1218065Sadrian/*- 2218065Sadrian * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3240592Sadrian * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd 4218065Sadrian * All rights reserved. 5218065Sadrian * 6218065Sadrian * Redistribution and use in source and binary forms, with or without 7218065Sadrian * modification, are permitted provided that the following conditions 8218065Sadrian * are met: 9218065Sadrian * 1. Redistributions of source code must retain the above copyright 10218065Sadrian * notice, this list of conditions and the following disclaimer, 11218065Sadrian * without modification. 12218065Sadrian * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13218065Sadrian * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 14218065Sadrian * redistribution must be conditioned upon including a substantially 15218065Sadrian * similar Disclaimer requirement for further binary redistribution. 16218065Sadrian * 17218065Sadrian * NO WARRANTY 18218065Sadrian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19218065Sadrian * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20218065Sadrian * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 21218065Sadrian * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 22218065Sadrian * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 23218065Sadrian * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24218065Sadrian * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25218065Sadrian * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 26218065Sadrian * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27218065Sadrian * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28218065Sadrian * THE POSSIBILITY OF SUCH DAMAGES. 29218065Sadrian */ 30218065Sadrian 31218065Sadrian#include <sys/cdefs.h> 32218065Sadrian__FBSDID("$FreeBSD$"); 33218065Sadrian 34218065Sadrian/* 35218065Sadrian * Driver for the Atheros Wireless LAN controller. 36218065Sadrian * 37218065Sadrian * This software is derived from work of Atsushi Onoe; his contribution 38218065Sadrian * is greatly appreciated. 39218065Sadrian */ 40218065Sadrian 41218065Sadrian#include "opt_inet.h" 42218065Sadrian#include "opt_ath.h" 43218065Sadrian#include "opt_wlan.h" 44218065Sadrian 45218065Sadrian#include <sys/param.h> 46218065Sadrian#include <sys/systm.h> 47218065Sadrian#include <sys/sysctl.h> 48218065Sadrian#include <sys/mbuf.h> 49218065Sadrian#include <sys/malloc.h> 50218065Sadrian#include <sys/lock.h> 51218065Sadrian#include <sys/mutex.h> 52218065Sadrian#include <sys/kernel.h> 53218065Sadrian#include <sys/socket.h> 54218065Sadrian#include <sys/sockio.h> 55218065Sadrian#include <sys/errno.h> 56218065Sadrian#include <sys/callout.h> 57218065Sadrian#include <sys/bus.h> 58218065Sadrian#include <sys/endian.h> 59218065Sadrian#include <sys/kthread.h> 60218065Sadrian#include <sys/taskqueue.h> 61218065Sadrian#include <sys/priv.h> 62218065Sadrian 63218065Sadrian#include <machine/bus.h> 64218065Sadrian 65218065Sadrian#include <net/if.h> 66218065Sadrian#include <net/if_dl.h> 67218065Sadrian#include <net/if_media.h> 68218065Sadrian#include <net/if_types.h> 69218065Sadrian#include <net/if_arp.h> 70218065Sadrian#include <net/ethernet.h> 71218065Sadrian#include <net/if_llc.h> 72218065Sadrian 73218065Sadrian#include <net80211/ieee80211_var.h> 74218065Sadrian#include <net80211/ieee80211_regdomain.h> 75218065Sadrian#ifdef IEEE80211_SUPPORT_SUPERG 76218065Sadrian#include <net80211/ieee80211_superg.h> 77218065Sadrian#endif 78218065Sadrian#ifdef IEEE80211_SUPPORT_TDMA 79218065Sadrian#include <net80211/ieee80211_tdma.h> 80218065Sadrian#endif 81227364Sadrian#include <net80211/ieee80211_ht.h> 82218065Sadrian 83218065Sadrian#include <net/bpf.h> 84218065Sadrian 85218065Sadrian#ifdef INET 86218065Sadrian#include <netinet/in.h> 87218065Sadrian#include <netinet/if_ether.h> 88218065Sadrian#endif 89218065Sadrian 90218065Sadrian#include <dev/ath/if_athvar.h> 91218065Sadrian#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 92218065Sadrian#include <dev/ath/ath_hal/ah_diagcodes.h> 93218065Sadrian 94218065Sadrian#include <dev/ath/if_ath_debug.h> 95218065Sadrian 96218065Sadrian#ifdef ATH_TX99_DIAG 97218065Sadrian#include <dev/ath/ath_tx99/ath_tx99.h> 98218065Sadrian#endif 99218065Sadrian 100218065Sadrian#include <dev/ath/if_ath_misc.h> 101218065Sadrian#include <dev/ath/if_ath_tx.h> 102218240Sadrian#include <dev/ath/if_ath_tx_ht.h> 103218065Sadrian 104242782Sadrian#ifdef ATH_DEBUG_ALQ 105242782Sadrian#include <dev/ath/if_ath_alq.h> 106242782Sadrian#endif 107242782Sadrian 108218154Sadrian/* 109227364Sadrian * How many retries to perform in software 110227364Sadrian */ 111227364Sadrian#define SWMAX_RETRIES 10 112227364Sadrian 113240946Sadrian/* 114240946Sadrian * What queue to throw the non-QoS TID traffic into 115240946Sadrian */ 116240946Sadrian#define ATH_NONQOS_TID_AC WME_AC_VO 117240946Sadrian 118241170Sadrian#if 0 119241170Sadrianstatic int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); 120241170Sadrian#endif 121227364Sadrianstatic int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, 122227364Sadrian int tid); 123227364Sadrianstatic int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, 124227364Sadrian int tid); 125236872Sadrianstatic ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, 126236872Sadrian struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); 127227364Sadrianstatic int ath_tx_action_frame_override_queue(struct ath_softc *sc, 128227364Sadrian struct ieee80211_node *ni, struct mbuf *m0, int *tid); 129240639Sadrianstatic struct ath_buf * 130240639Sadrianath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 131240639Sadrian struct ath_tid *tid, struct ath_buf *bf); 132227364Sadrian 133243162Sadrian#ifdef ATH_DEBUG_ALQ 134243162Sadrianvoid 135243162Sadrianath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) 136243162Sadrian{ 137243162Sadrian struct ath_buf *bf; 138243162Sadrian int i, n; 139243162Sadrian const char *ds; 140243162Sadrian 141243162Sadrian /* XXX we should skip out early if debugging isn't enabled! */ 142243162Sadrian bf = bf_first; 143243162Sadrian 144243162Sadrian while (bf != NULL) { 145243162Sadrian /* XXX should ensure bf_nseg > 0! */ 146243162Sadrian if (bf->bf_nseg == 0) 147243162Sadrian break; 148243162Sadrian n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; 149243162Sadrian for (i = 0, ds = (const char *) bf->bf_desc; 150243162Sadrian i < n; 151243162Sadrian i++, ds += sc->sc_tx_desclen) { 152243162Sadrian if_ath_alq_post(&sc->sc_alq, 153243162Sadrian ATH_ALQ_EDMA_TXDESC, 154243162Sadrian sc->sc_tx_desclen, 155243162Sadrian ds); 156243162Sadrian } 157243162Sadrian bf = bf->bf_next; 158243162Sadrian } 159243162Sadrian} 160243162Sadrian#endif /* ATH_DEBUG_ALQ */ 161243162Sadrian 162227364Sadrian/* 163218154Sadrian * Whether to use the 11n rate scenario functions or not 164218154Sadrian */ 165218154Sadrianstatic inline int 166218154Sadrianath_tx_is_11n(struct ath_softc *sc) 167218154Sadrian{ 168239198Sadrian return ((sc->sc_ah->ah_magic == 0x20065416) || 169239198Sadrian (sc->sc_ah->ah_magic == 0x19741014)); 170218154Sadrian} 171218154Sadrian 172227364Sadrian/* 173227364Sadrian * Obtain the current TID from the given frame. 174227364Sadrian * 175227364Sadrian * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) 176227364Sadrian * This has implications for which AC/priority the packet is placed 177227364Sadrian * in. 178227364Sadrian */ 179227364Sadrianstatic int 180227364Sadrianath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) 181227364Sadrian{ 182227364Sadrian const struct ieee80211_frame *wh; 183227364Sadrian int pri = M_WME_GETAC(m0); 184227364Sadrian 185227364Sadrian wh = mtod(m0, const struct ieee80211_frame *); 186227364Sadrian if (! IEEE80211_QOS_HAS_SEQ(wh)) 187227364Sadrian return IEEE80211_NONQOS_TID; 188227364Sadrian else 189227364Sadrian return WME_AC_TO_TID(pri); 190227364Sadrian} 191227364Sadrian 192240639Sadrianstatic void 193240639Sadrianath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) 194240639Sadrian{ 195240639Sadrian struct ieee80211_frame *wh; 196240639Sadrian 197240639Sadrian wh = mtod(bf->bf_m, struct ieee80211_frame *); 198240639Sadrian /* Only update/resync if needed */ 199240639Sadrian if (bf->bf_state.bfs_isretried == 0) { 200240639Sadrian wh->i_fc[1] |= IEEE80211_FC1_RETRY; 201240639Sadrian bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 202240639Sadrian BUS_DMASYNC_PREWRITE); 203240639Sadrian } 204240639Sadrian bf->bf_state.bfs_isretried = 1; 205240639Sadrian bf->bf_state.bfs_retries ++; 206240639Sadrian} 207240639Sadrian 208227364Sadrian/* 209227364Sadrian * Determine what the correct AC queue for the given frame 210227364Sadrian * should be. 211227364Sadrian * 212227364Sadrian * This code assumes that the TIDs map consistently to 213227364Sadrian * the underlying hardware (or software) ath_txq. 214227364Sadrian * Since the sender may try to set an AC which is 215227364Sadrian * arbitrary, non-QoS TIDs may end up being put on 216227364Sadrian * completely different ACs. There's no way to put a 217227364Sadrian * TID into multiple ath_txq's for scheduling, so 218227364Sadrian * for now we override the AC/TXQ selection and set 219227364Sadrian * non-QOS TID frames into the BE queue. 220227364Sadrian * 221227364Sadrian * This may be completely incorrect - specifically, 222227364Sadrian * some management frames may end up out of order 223227364Sadrian * compared to the QoS traffic they're controlling. 224227364Sadrian * I'll look into this later. 225227364Sadrian */ 226227364Sadrianstatic int 227227364Sadrianath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) 228227364Sadrian{ 229227364Sadrian const struct ieee80211_frame *wh; 230227364Sadrian int pri = M_WME_GETAC(m0); 231227364Sadrian wh = mtod(m0, const struct ieee80211_frame *); 232227364Sadrian if (IEEE80211_QOS_HAS_SEQ(wh)) 233227364Sadrian return pri; 234227364Sadrian 235240946Sadrian return ATH_NONQOS_TID_AC; 236227364Sadrian} 237227364Sadrian 238218065Sadrianvoid 239218065Sadrianath_txfrag_cleanup(struct ath_softc *sc, 240218065Sadrian ath_bufhead *frags, struct ieee80211_node *ni) 241218065Sadrian{ 242218065Sadrian struct ath_buf *bf, *next; 243218065Sadrian 244218065Sadrian ATH_TXBUF_LOCK_ASSERT(sc); 245218065Sadrian 246227344Sadrian TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { 247218065Sadrian /* NB: bf assumed clean */ 248227344Sadrian TAILQ_REMOVE(frags, bf, bf_list); 249236993Sadrian ath_returnbuf_head(sc, bf); 250218065Sadrian ieee80211_node_decref(ni); 251218065Sadrian } 252218065Sadrian} 253218065Sadrian 254218065Sadrian/* 255218065Sadrian * Setup xmit of a fragmented frame. Allocate a buffer 256218065Sadrian * for each frag and bump the node reference count to 257218065Sadrian * reflect the held reference to be setup by ath_tx_start. 258218065Sadrian */ 259218065Sadrianint 260218065Sadrianath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, 261218065Sadrian struct mbuf *m0, struct ieee80211_node *ni) 262218065Sadrian{ 263218065Sadrian struct mbuf *m; 264218065Sadrian struct ath_buf *bf; 265218065Sadrian 266218065Sadrian ATH_TXBUF_LOCK(sc); 267218065Sadrian for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { 268237000Sadrian /* XXX non-management? */ 269237000Sadrian bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 270218065Sadrian if (bf == NULL) { /* out of buffers, cleanup */ 271259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", 272234009Sadrian __func__); 273218065Sadrian ath_txfrag_cleanup(sc, frags, ni); 274218065Sadrian break; 275218065Sadrian } 276218065Sadrian ieee80211_node_incref(ni); 277227344Sadrian TAILQ_INSERT_TAIL(frags, bf, bf_list); 278218065Sadrian } 279218065Sadrian ATH_TXBUF_UNLOCK(sc); 280218065Sadrian 281227344Sadrian return !TAILQ_EMPTY(frags); 282218065Sadrian} 283218065Sadrian 284218065Sadrian/* 285218065Sadrian * Reclaim mbuf resources. For fragmented frames we 286218065Sadrian * need to claim each frag chained with m_nextpkt. 287218065Sadrian */ 288218065Sadrianvoid 289218065Sadrianath_freetx(struct mbuf *m) 290218065Sadrian{ 291218065Sadrian struct mbuf *next; 292218065Sadrian 293218065Sadrian do { 294218065Sadrian next = m->m_nextpkt; 295218065Sadrian m->m_nextpkt = NULL; 296218065Sadrian m_freem(m); 297218065Sadrian } while ((m = next) != NULL); 298218065Sadrian} 299218065Sadrian 300218065Sadrianstatic int 301218065Sadrianath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) 302218065Sadrian{ 303218065Sadrian struct mbuf *m; 304218065Sadrian int error; 305218065Sadrian 306218065Sadrian /* 307218065Sadrian * Load the DMA map so any coalescing is done. This 308218065Sadrian * also calculates the number of descriptors we need. 309218065Sadrian */ 310218065Sadrian error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 311218065Sadrian bf->bf_segs, &bf->bf_nseg, 312218065Sadrian BUS_DMA_NOWAIT); 313218065Sadrian if (error == EFBIG) { 314218065Sadrian /* XXX packet requires too many descriptors */ 315248985Sadrian bf->bf_nseg = ATH_MAX_SCATTER + 1; 316218065Sadrian } else if (error != 0) { 317218065Sadrian sc->sc_stats.ast_tx_busdma++; 318218065Sadrian ath_freetx(m0); 319218065Sadrian return error; 320218065Sadrian } 321218065Sadrian /* 322218065Sadrian * Discard null packets and check for packets that 323218065Sadrian * require too many TX descriptors. We try to convert 324218065Sadrian * the latter to a cluster. 325218065Sadrian */ 326248985Sadrian if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ 327218065Sadrian sc->sc_stats.ast_tx_linear++; 328248985Sadrian m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); 329218065Sadrian if (m == NULL) { 330218065Sadrian ath_freetx(m0); 331218065Sadrian sc->sc_stats.ast_tx_nombuf++; 332218065Sadrian return ENOMEM; 333218065Sadrian } 334218065Sadrian m0 = m; 335218065Sadrian error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, 336218065Sadrian bf->bf_segs, &bf->bf_nseg, 337218065Sadrian BUS_DMA_NOWAIT); 338218065Sadrian if (error != 0) { 339218065Sadrian sc->sc_stats.ast_tx_busdma++; 340218065Sadrian ath_freetx(m0); 341218065Sadrian return error; 342218065Sadrian } 343248985Sadrian KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, 344218065Sadrian ("too many segments after defrag; nseg %u", bf->bf_nseg)); 345218065Sadrian } else if (bf->bf_nseg == 0) { /* null packet, discard */ 346218065Sadrian sc->sc_stats.ast_tx_nodata++; 347218065Sadrian ath_freetx(m0); 348218065Sadrian return EIO; 349218065Sadrian } 350218065Sadrian DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", 351218065Sadrian __func__, m0, m0->m_pkthdr.len); 352218065Sadrian bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 353218065Sadrian bf->bf_m = m0; 354218065Sadrian 355218065Sadrian return 0; 356218065Sadrian} 357218065Sadrian 358227360Sadrian/* 359242656Sadrian * Chain together segments+descriptors for a frame - 11n or otherwise. 360242656Sadrian * 361242656Sadrian * For aggregates, this is called on each frame in the aggregate. 362227360Sadrian */ 363218065Sadrianstatic void 364242656Sadrianath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, 365242656Sadrian struct ath_buf *bf, int is_aggr, int is_first_subframe, 366242656Sadrian int is_last_subframe) 367218065Sadrian{ 368218065Sadrian struct ath_hal *ah = sc->sc_ah; 369242656Sadrian char *ds; 370239290Sadrian int i, bp, dsp; 371239051Sadrian HAL_DMA_ADDR bufAddrList[4]; 372239051Sadrian uint32_t segLenList[4]; 373239290Sadrian int numTxMaps = 1; 374239380Sadrian int isFirstDesc = 1; 375239051Sadrian 376238708Sadrian /* 377238708Sadrian * XXX There's txdma and txdma_mgmt; the descriptor 378238708Sadrian * sizes must match. 379238708Sadrian */ 380238708Sadrian struct ath_descdma *dd = &sc->sc_txdma; 381218065Sadrian 382218065Sadrian /* 383218065Sadrian * Fillin the remainder of the descriptor info. 384218065Sadrian */ 385239290Sadrian 386239290Sadrian /* 387248527Sadrian * We need the number of TX data pointers in each descriptor. 388248527Sadrian * EDMA and later chips support 4 TX buffers per descriptor; 389248527Sadrian * previous chips just support one. 390239290Sadrian */ 391248527Sadrian numTxMaps = sc->sc_tx_nmaps; 392239290Sadrian 393239290Sadrian /* 394239290Sadrian * For EDMA and later chips ensure the TX map is fully populated 395239290Sadrian * before advancing to the next descriptor. 396239290Sadrian */ 397242656Sadrian ds = (char *) bf->bf_desc; 398239290Sadrian bp = dsp = 0; 399239290Sadrian bzero(bufAddrList, sizeof(bufAddrList)); 400239290Sadrian bzero(segLenList, sizeof(segLenList)); 401239290Sadrian for (i = 0; i < bf->bf_nseg; i++) { 402239290Sadrian bufAddrList[bp] = bf->bf_segs[i].ds_addr; 403239290Sadrian segLenList[bp] = bf->bf_segs[i].ds_len; 404239290Sadrian bp++; 405239051Sadrian 406239290Sadrian /* 407239290Sadrian * Go to the next segment if this isn't the last segment 408239290Sadrian * and there's space in the current TX map. 409239290Sadrian */ 410239290Sadrian if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) 411239290Sadrian continue; 412239051Sadrian 413239290Sadrian /* 414239290Sadrian * Last segment or we're out of buffer pointers. 415239290Sadrian */ 416239290Sadrian bp = 0; 417239290Sadrian 418218065Sadrian if (i == bf->bf_nseg - 1) 419239409Sadrian ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); 420218065Sadrian else 421239409Sadrian ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 422239290Sadrian bf->bf_daddr + dd->dd_descsize * (dsp + 1)); 423239051Sadrian 424239051Sadrian /* 425244109Sadrian * XXX This assumes that bfs_txq is the actual destination 426244109Sadrian * hardware queue at this point. It may not have been 427244109Sadrian * assigned, it may actually be pointing to the multicast 428244109Sadrian * software TXQ id. These must be fixed! 429239051Sadrian */ 430239409Sadrian ath_hal_filltxdesc(ah, (struct ath_desc *) ds 431239051Sadrian , bufAddrList 432239051Sadrian , segLenList 433239290Sadrian , bf->bf_descid /* XXX desc id */ 434244109Sadrian , bf->bf_state.bfs_tx_queue 435239380Sadrian , isFirstDesc /* first segment */ 436218065Sadrian , i == bf->bf_nseg - 1 /* last segment */ 437239409Sadrian , (struct ath_desc *) ds0 /* first descriptor */ 438218065Sadrian ); 439240255Sadrian 440242656Sadrian /* 441242656Sadrian * Make sure the 11n aggregate fields are cleared. 442242656Sadrian * 443242656Sadrian * XXX TODO: this doesn't need to be called for 444242656Sadrian * aggregate frames; as it'll be called on all 445242656Sadrian * sub-frames. Since the descriptors are in 446242656Sadrian * non-cacheable memory, this leads to some 447242656Sadrian * rather slow writes on MIPS/ARM platforms. 448242656Sadrian */ 449240255Sadrian if (ath_tx_is_11n(sc)) 450240333Sadrian ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); 451240255Sadrian 452242656Sadrian /* 453242656Sadrian * If 11n is enabled, set it up as if it's an aggregate 454242656Sadrian * frame. 455242656Sadrian */ 456242656Sadrian if (is_last_subframe) { 457242656Sadrian ath_hal_set11n_aggr_last(sc->sc_ah, 458242656Sadrian (struct ath_desc *) ds); 459242656Sadrian } else if (is_aggr) { 460242656Sadrian /* 461242656Sadrian * This clears the aggrlen field; so 462242656Sadrian * the caller needs to call set_aggr_first()! 463242656Sadrian * 464242656Sadrian * XXX TODO: don't call this for the first 465242656Sadrian * descriptor in the first frame in an 466242656Sadrian * aggregate! 467242656Sadrian */ 468242656Sadrian ath_hal_set11n_aggr_middle(sc->sc_ah, 469242656Sadrian (struct ath_desc *) ds, 470242656Sadrian bf->bf_state.bfs_ndelim); 471242656Sadrian } 472239380Sadrian isFirstDesc = 0; 473239409Sadrian bf->bf_lastds = (struct ath_desc *) ds; 474239290Sadrian 475239290Sadrian /* 476239290Sadrian * Don't forget to skip to the next descriptor. 477239290Sadrian */ 478239409Sadrian ds += sc->sc_tx_desclen; 479239290Sadrian dsp++; 480239290Sadrian 481239290Sadrian /* 482239290Sadrian * .. and don't forget to blank these out! 483239290Sadrian */ 484239290Sadrian bzero(bufAddrList, sizeof(bufAddrList)); 485239290Sadrian bzero(segLenList, sizeof(segLenList)); 486218065Sadrian } 487233990Sadrian bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 488218154Sadrian} 489218154Sadrian 490227364Sadrian/* 491238947Sadrian * Set the rate control fields in the given descriptor based on 492238947Sadrian * the bf_state fields and node state. 493238947Sadrian * 494238947Sadrian * The bfs fields should already be set with the relevant rate 495238947Sadrian * control information, including whether MRR is to be enabled. 496238947Sadrian * 497238947Sadrian * Since the FreeBSD HAL currently sets up the first TX rate 498238947Sadrian * in ath_hal_setuptxdesc(), this will setup the MRR 499238947Sadrian * conditionally for the pre-11n chips, and call ath_buf_set_rate 500238947Sadrian * unconditionally for 11n chips. These require the 11n rate 501238947Sadrian * scenario to be set if MCS rates are enabled, so it's easier 502238947Sadrian * to just always call it. The caller can then only set rates 2, 3 503238947Sadrian * and 4 if multi-rate retry is needed. 504238947Sadrian */ 505238947Sadrianstatic void 506238947Sadrianath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 507238947Sadrian struct ath_buf *bf) 508238947Sadrian{ 509238947Sadrian struct ath_rc_series *rc = bf->bf_state.bfs_rc; 510238947Sadrian 511238947Sadrian /* If mrr is disabled, blank tries 1, 2, 3 */ 512238947Sadrian if (! bf->bf_state.bfs_ismrr) 513238947Sadrian rc[1].tries = rc[2].tries = rc[3].tries = 0; 514238947Sadrian 515243647Sadrian#if 0 516238947Sadrian /* 517243647Sadrian * If NOACK is set, just set ntries=1. 518243647Sadrian */ 519243647Sadrian else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { 520243647Sadrian rc[1].tries = rc[2].tries = rc[3].tries = 0; 521243647Sadrian rc[0].tries = 1; 522243647Sadrian } 523243647Sadrian#endif 524243647Sadrian 525243647Sadrian /* 526238947Sadrian * Always call - that way a retried descriptor will 527238947Sadrian * have the MRR fields overwritten. 528238947Sadrian * 529238947Sadrian * XXX TODO: see if this is really needed - setting up 530238947Sadrian * the first descriptor should set the MRR fields to 0 531238947Sadrian * for us anyway. 532238947Sadrian */ 533238947Sadrian if (ath_tx_is_11n(sc)) { 534238947Sadrian ath_buf_set_rate(sc, ni, bf); 535238947Sadrian } else { 536238947Sadrian ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc 537238947Sadrian , rc[1].ratecode, rc[1].tries 538238947Sadrian , rc[2].ratecode, rc[2].tries 539238947Sadrian , rc[3].ratecode, rc[3].tries 540238947Sadrian ); 541238947Sadrian } 542238947Sadrian} 543238947Sadrian 544238947Sadrian/* 545227364Sadrian * Setup segments+descriptors for an 11n aggregate. 546227364Sadrian * bf_first is the first buffer in the aggregate. 547227364Sadrian * The descriptor list must already been linked together using 548227364Sadrian * bf->bf_next. 549227364Sadrian */ 550227364Sadrianstatic void 551227364Sadrianath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) 552227364Sadrian{ 553227364Sadrian struct ath_buf *bf, *bf_prev = NULL; 554242656Sadrian struct ath_desc *ds0 = bf_first->bf_desc; 555227364Sadrian 556227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", 557227364Sadrian __func__, bf_first->bf_state.bfs_nframes, 558227364Sadrian bf_first->bf_state.bfs_al); 559227364Sadrian 560242951Sadrian bf = bf_first; 561242951Sadrian 562242951Sadrian if (bf->bf_state.bfs_txrate0 == 0) 563259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", 564242951Sadrian __func__, bf, 0); 565242951Sadrian if (bf->bf_state.bfs_rc[0].ratecode == 0) 566259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", 567242951Sadrian __func__, bf, 0); 568242951Sadrian 569227364Sadrian /* 570242656Sadrian * Setup all descriptors of all subframes - this will 571242656Sadrian * call ath_hal_set11naggrmiddle() on every frame. 572227364Sadrian */ 573227364Sadrian while (bf != NULL) { 574227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 575227364Sadrian "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", 576227364Sadrian __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, 577227364Sadrian SEQNO(bf->bf_state.bfs_seqno)); 578227364Sadrian 579242656Sadrian /* 580242656Sadrian * Setup the initial fields for the first descriptor - all 581242656Sadrian * the non-11n specific stuff. 582242656Sadrian */ 583242656Sadrian ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc 584242656Sadrian , bf->bf_state.bfs_pktlen /* packet length */ 585242656Sadrian , bf->bf_state.bfs_hdrlen /* header length */ 586242656Sadrian , bf->bf_state.bfs_atype /* Atheros packet type */ 587242656Sadrian , bf->bf_state.bfs_txpower /* txpower */ 588242656Sadrian , bf->bf_state.bfs_txrate0 589242656Sadrian , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 590242656Sadrian , bf->bf_state.bfs_keyix /* key cache index */ 591242656Sadrian , bf->bf_state.bfs_txantenna /* antenna mode */ 592242656Sadrian , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ 593242656Sadrian , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 594242656Sadrian , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 595242656Sadrian ); 596227364Sadrian 597227364Sadrian /* 598242656Sadrian * First descriptor? Setup the rate control and initial 599242656Sadrian * aggregate header information. 600242656Sadrian */ 601242656Sadrian if (bf == bf_first) { 602242656Sadrian /* 603242656Sadrian * setup first desc with rate and aggr info 604242656Sadrian */ 605242656Sadrian ath_tx_set_ratectrl(sc, bf->bf_node, bf); 606242656Sadrian } 607242656Sadrian 608242656Sadrian /* 609242656Sadrian * Setup the descriptors for a multi-descriptor frame. 610242656Sadrian * This is both aggregate and non-aggregate aware. 611242656Sadrian */ 612242656Sadrian ath_tx_chaindesclist(sc, ds0, bf, 613242656Sadrian 1, /* is_aggr */ 614242656Sadrian !! (bf == bf_first), /* is_first_subframe */ 615242656Sadrian !! (bf->bf_next == NULL) /* is_last_subframe */ 616242656Sadrian ); 617242656Sadrian 618242656Sadrian if (bf == bf_first) { 619242656Sadrian /* 620242656Sadrian * Initialise the first 11n aggregate with the 621242656Sadrian * aggregate length and aggregate enable bits. 622242656Sadrian */ 623242656Sadrian ath_hal_set11n_aggr_first(sc->sc_ah, 624242656Sadrian ds0, 625242656Sadrian bf->bf_state.bfs_al, 626242656Sadrian bf->bf_state.bfs_ndelim); 627242656Sadrian } 628242656Sadrian 629242656Sadrian /* 630227364Sadrian * Link the last descriptor of the previous frame 631227364Sadrian * to the beginning descriptor of this frame. 632227364Sadrian */ 633227364Sadrian if (bf_prev != NULL) 634238609Sadrian ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, 635238609Sadrian bf->bf_daddr); 636227364Sadrian 637227364Sadrian /* Save a copy so we can link the next descriptor in */ 638227364Sadrian bf_prev = bf; 639227364Sadrian bf = bf->bf_next; 640227364Sadrian } 641227364Sadrian 642227364Sadrian /* 643227364Sadrian * Set the first descriptor bf_lastds field to point to 644227364Sadrian * the last descriptor in the last subframe, that's where 645227364Sadrian * the status update will occur. 646227364Sadrian */ 647227364Sadrian bf_first->bf_lastds = bf_prev->bf_lastds; 648227364Sadrian 649227364Sadrian /* 650227364Sadrian * And bf_last in the first descriptor points to the end of 651227364Sadrian * the aggregate list. 652227364Sadrian */ 653227364Sadrian bf_first->bf_last = bf_prev; 654227364Sadrian 655243047Sadrian /* 656243047Sadrian * For non-AR9300 NICs, which require the rate control 657243047Sadrian * in the final descriptor - let's set that up now. 658243047Sadrian * 659243047Sadrian * This is because the filltxdesc() HAL call doesn't 660243047Sadrian * populate the last segment with rate control information 661243047Sadrian * if firstSeg is also true. For non-aggregate frames 662243047Sadrian * that is fine, as the first frame already has rate control 663243047Sadrian * info. But if the last frame in an aggregate has one 664243047Sadrian * descriptor, both firstseg and lastseg will be true and 665243047Sadrian * the rate info isn't copied. 666243047Sadrian * 667243047Sadrian * This is inefficient on MIPS/ARM platforms that have 668243047Sadrian * non-cachable memory for TX descriptors, but we'll just 669243047Sadrian * make do for now. 670243047Sadrian * 671243047Sadrian * As to why the rate table is stashed in the last descriptor 672243047Sadrian * rather than the first descriptor? Because proctxdesc() 673243047Sadrian * is called on the final descriptor in an MPDU or A-MPDU - 674243047Sadrian * ie, the one that gets updated by the hardware upon 675243047Sadrian * completion. That way proctxdesc() doesn't need to know 676243047Sadrian * about the first _and_ last TX descriptor. 677243047Sadrian */ 678243047Sadrian ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); 679243047Sadrian 680227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); 681227364Sadrian} 682227364Sadrian 683239051Sadrian/* 684239051Sadrian * Hand-off a frame to the multicast TX queue. 685239051Sadrian * 686239051Sadrian * This is a software TXQ which will be appended to the CAB queue 687239051Sadrian * during the beacon setup code. 688239051Sadrian * 689239051Sadrian * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID 690244109Sadrian * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated 691239051Sadrian * with the actual hardware txq, or all of this will fall apart. 692239051Sadrian * 693239051Sadrian * XXX It may not be a bad idea to just stuff the QCU ID into bf_state 694244109Sadrian * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated 695239051Sadrian * correctly. 696239051Sadrian */ 697227364Sadrianstatic void 698227364Sadrianath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, 699227364Sadrian struct ath_buf *bf) 700227364Sadrian{ 701243786Sadrian ATH_TX_LOCK_ASSERT(sc); 702243786Sadrian 703227364Sadrian KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 704227364Sadrian ("%s: busy status 0x%x", __func__, bf->bf_flags)); 705248676Sadrian 706250735Sadrian /* 707250735Sadrian * Ensure that the tx queue is the cabq, so things get 708250735Sadrian * mapped correctly. 709250735Sadrian */ 710250735Sadrian if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { 711259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 712250735Sadrian "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 713259341Srpaulo __func__, bf, bf->bf_state.bfs_tx_queue, 714250735Sadrian txq->axq_qnum); 715250735Sadrian } 716250735Sadrian 717248676Sadrian ATH_TXQ_LOCK(txq); 718248713Sadrian if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { 719248713Sadrian struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); 720227364Sadrian struct ieee80211_frame *wh; 721227364Sadrian 722227364Sadrian /* mark previous frame */ 723248713Sadrian wh = mtod(bf_last->bf_m, struct ieee80211_frame *); 724227364Sadrian wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 725248713Sadrian bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, 726227364Sadrian BUS_DMASYNC_PREWRITE); 727227364Sadrian 728227364Sadrian /* link descriptor */ 729248713Sadrian ath_hal_settxdesclink(sc->sc_ah, 730248713Sadrian bf_last->bf_lastds, 731248713Sadrian bf->bf_daddr); 732227364Sadrian } 733227364Sadrian ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 734248671Sadrian ATH_TXQ_UNLOCK(txq); 735227364Sadrian} 736227364Sadrian 737227364Sadrian/* 738227364Sadrian * Hand-off packet to a hardware queue. 739227364Sadrian */ 740227364Sadrianstatic void 741229949Sadrianath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, 742229949Sadrian struct ath_buf *bf) 743227364Sadrian{ 744227364Sadrian struct ath_hal *ah = sc->sc_ah; 745250783Sadrian struct ath_buf *bf_first; 746227364Sadrian 747227364Sadrian /* 748218065Sadrian * Insert the frame on the outbound list and pass it on 749218065Sadrian * to the hardware. Multicast frames buffered for power 750218065Sadrian * save stations and transmit from the CAB queue are stored 751218065Sadrian * on a s/w only queue and loaded on to the CAB queue in 752218065Sadrian * the SWBA handler since frames only go out on DTIM and 753218065Sadrian * to avoid possible races. 754218065Sadrian */ 755243786Sadrian ATH_TX_LOCK_ASSERT(sc); 756218065Sadrian KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, 757227364Sadrian ("%s: busy status 0x%x", __func__, bf->bf_flags)); 758227364Sadrian KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, 759227364Sadrian ("ath_tx_handoff_hw called for mcast queue")); 760227364Sadrian 761250783Sadrian /* 762250783Sadrian * XXX racy, should hold the PCU lock when checking this, 763250783Sadrian * and also should ensure that the TX counter is >0! 764250783Sadrian */ 765250783Sadrian KASSERT((sc->sc_inreset_cnt == 0), 766250783Sadrian ("%s: TX during reset?\n", __func__)); 767250783Sadrian 768227651Sadrian#if 0 769227651Sadrian /* 770227651Sadrian * This causes a LOR. Find out where the PCU lock is being 771227651Sadrian * held whilst the TXQ lock is grabbed - that shouldn't 772227651Sadrian * be occuring. 773227651Sadrian */ 774227651Sadrian ATH_PCU_LOCK(sc); 775227651Sadrian if (sc->sc_inreset_cnt) { 776227651Sadrian ATH_PCU_UNLOCK(sc); 777227651Sadrian DPRINTF(sc, ATH_DEBUG_RESET, 778227651Sadrian "%s: called with sc_in_reset != 0\n", 779227651Sadrian __func__); 780227651Sadrian DPRINTF(sc, ATH_DEBUG_XMIT, 781227651Sadrian "%s: queued: TXDP[%u] = %p (%p) depth %d\n", 782227651Sadrian __func__, txq->axq_qnum, 783227651Sadrian (caddr_t)bf->bf_daddr, bf->bf_desc, 784227651Sadrian txq->axq_depth); 785250783Sadrian /* XXX axq_link needs to be set and updated! */ 786227651Sadrian ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 787227651Sadrian if (bf->bf_state.bfs_aggr) 788227651Sadrian txq->axq_aggr_depth++; 789227651Sadrian return; 790227651Sadrian } 791227651Sadrian ATH_PCU_UNLOCK(sc); 792227651Sadrian#endif 793227651Sadrian 794250783Sadrian ATH_TXQ_LOCK(txq); 795218065Sadrian 796250783Sadrian /* 797250783Sadrian * XXX TODO: if there's a holdingbf, then 798250783Sadrian * ATH_TXQ_PUTRUNNING should be clear. 799250783Sadrian * 800250783Sadrian * If there is a holdingbf and the list is empty, 801250783Sadrian * then axq_link should be pointing to the holdingbf. 802250783Sadrian * 803250783Sadrian * Otherwise it should point to the last descriptor 804250783Sadrian * in the last ath_buf. 805250783Sadrian * 806250783Sadrian * In any case, we should really ensure that we 807250783Sadrian * update the previous descriptor link pointer to 808250783Sadrian * this descriptor, regardless of all of the above state. 809250783Sadrian * 810250783Sadrian * For now this is captured by having axq_link point 811250783Sadrian * to either the holdingbf (if the TXQ list is empty) 812250783Sadrian * or the end of the list (if the TXQ list isn't empty.) 813250783Sadrian * I'd rather just kill axq_link here and do it as above. 814250783Sadrian */ 815240899Sadrian 816250783Sadrian /* 817250783Sadrian * Append the frame to the TX queue. 818250783Sadrian */ 819250783Sadrian ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); 820250783Sadrian ATH_KTR(sc, ATH_KTR_TX, 3, 821250783Sadrian "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " 822250783Sadrian "depth=%d", 823250783Sadrian txq->axq_qnum, 824250783Sadrian bf, 825250783Sadrian txq->axq_depth); 826240899Sadrian 827250783Sadrian /* 828250783Sadrian * If there's a link pointer, update it. 829250783Sadrian * 830250783Sadrian * XXX we should replace this with the above logic, just 831250783Sadrian * to kill axq_link with fire. 832250783Sadrian */ 833250783Sadrian if (txq->axq_link != NULL) { 834250783Sadrian *txq->axq_link = bf->bf_daddr; 835250783Sadrian DPRINTF(sc, ATH_DEBUG_XMIT, 836250783Sadrian "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, 837250783Sadrian txq->axq_qnum, txq->axq_link, 838250783Sadrian (caddr_t)bf->bf_daddr, bf->bf_desc, 839250783Sadrian txq->axq_depth); 840250783Sadrian ATH_KTR(sc, ATH_KTR_TX, 5, 841250783Sadrian "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " 842250783Sadrian "lastds=%d", 843250783Sadrian txq->axq_qnum, txq->axq_link, 844250783Sadrian (caddr_t)bf->bf_daddr, bf->bf_desc, 845250783Sadrian bf->bf_lastds); 846250783Sadrian } 847250783Sadrian 848250783Sadrian /* 849250783Sadrian * If we've not pushed anything into the hardware yet, 850250783Sadrian * push the head of the queue into the TxDP. 851250783Sadrian * 852250783Sadrian * Once we've started DMA, there's no guarantee that 853250783Sadrian * updating the TxDP with a new value will actually work. 854250783Sadrian * So we just don't do that - if we hit the end of the list, 855250783Sadrian * we keep that buffer around (the "holding buffer") and 856250783Sadrian * re-start DMA by updating the link pointer of _that_ 857250783Sadrian * descriptor and then restart DMA. 858250783Sadrian */ 859250783Sadrian if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { 860250783Sadrian bf_first = TAILQ_FIRST(&txq->axq_q); 861250783Sadrian txq->axq_flags |= ATH_TXQ_PUTRUNNING; 862250783Sadrian ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); 863250783Sadrian DPRINTF(sc, ATH_DEBUG_XMIT, 864250783Sadrian "%s: TXDP[%u] = %p (%p) depth %d\n", 865250783Sadrian __func__, txq->axq_qnum, 866250783Sadrian (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 867250783Sadrian txq->axq_depth); 868250783Sadrian ATH_KTR(sc, ATH_KTR_TX, 5, 869250783Sadrian "ath_tx_handoff: TXDP[%u] = %p (%p) " 870250783Sadrian "lastds=%p depth %d", 871240899Sadrian txq->axq_qnum, 872250783Sadrian (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, 873250783Sadrian bf_first->bf_lastds, 874240899Sadrian txq->axq_depth); 875250783Sadrian } 876240899Sadrian 877250783Sadrian /* 878250783Sadrian * Ensure that the bf TXQ matches this TXQ, so later 879250783Sadrian * checking and holding buffer manipulation is sane. 880250783Sadrian */ 881250783Sadrian if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { 882259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 883250783Sadrian "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", 884259341Srpaulo __func__, bf, bf->bf_state.bfs_tx_queue, 885250783Sadrian txq->axq_qnum); 886250783Sadrian } 887240899Sadrian 888250783Sadrian /* 889250783Sadrian * Track aggregate queue depth. 890250783Sadrian */ 891250783Sadrian if (bf->bf_state.bfs_aggr) 892250783Sadrian txq->axq_aggr_depth++; 893250735Sadrian 894250783Sadrian /* 895250783Sadrian * Update the link pointer. 896250783Sadrian */ 897250783Sadrian ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); 898250735Sadrian 899250783Sadrian /* 900250783Sadrian * Start DMA. 901250783Sadrian * 902250783Sadrian * If we wrote a TxDP above, DMA will start from here. 903250783Sadrian * 904250783Sadrian * If DMA is running, it'll do nothing. 905250783Sadrian * 906250783Sadrian * If the DMA engine hit the end of the QCU list (ie LINK=NULL, 907250783Sadrian * or VEOL) then it stops at the last transmitted write. 908250783Sadrian * We then append a new frame by updating the link pointer 909250783Sadrian * in that descriptor and then kick TxE here; it will re-read 910250783Sadrian * that last descriptor and find the new descriptor to transmit. 911250783Sadrian * 912250783Sadrian * This is why we keep the holding descriptor around. 913250783Sadrian */ 914250783Sadrian ath_hal_txstart(ah, txq->axq_qnum); 915250783Sadrian ATH_TXQ_UNLOCK(txq); 916250783Sadrian ATH_KTR(sc, ATH_KTR_TX, 1, 917250783Sadrian "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); 918227364Sadrian} 919218065Sadrian 920227364Sadrian/* 921227364Sadrian * Restart TX DMA for the given TXQ. 922227364Sadrian * 923227364Sadrian * This must be called whether the queue is empty or not. 924227364Sadrian */ 925238930Sadrianstatic void 926238930Sadrianath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) 927227364Sadrian{ 928232707Sadrian struct ath_buf *bf, *bf_last; 929218065Sadrian 930248671Sadrian ATH_TXQ_LOCK_ASSERT(txq); 931227364Sadrian 932232707Sadrian /* XXX make this ATH_TXQ_FIRST */ 933227364Sadrian bf = TAILQ_FIRST(&txq->axq_q); 934232707Sadrian bf_last = ATH_TXQ_LAST(txq, axq_q_s); 935232707Sadrian 936227364Sadrian if (bf == NULL) 937227364Sadrian return; 938227364Sadrian 939250783Sadrian DPRINTF(sc, ATH_DEBUG_RESET, 940250783Sadrian "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", 941250783Sadrian __func__, 942250783Sadrian txq->axq_qnum, 943250783Sadrian bf, 944250783Sadrian bf_last, 945250783Sadrian (uint32_t) bf->bf_daddr); 946250783Sadrian 947250796Sadrian#ifdef ATH_DEBUG 948250783Sadrian if (sc->sc_debug & ATH_DEBUG_RESET) 949250783Sadrian ath_tx_dump(sc, txq); 950250796Sadrian#endif 951250783Sadrian 952250783Sadrian /* 953250783Sadrian * This is called from a restart, so DMA is known to be 954250783Sadrian * completely stopped. 955250783Sadrian */ 956250783Sadrian KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), 957250783Sadrian ("%s: Q%d: called with PUTRUNNING=1\n", 958250783Sadrian __func__, 959250783Sadrian txq->axq_qnum)); 960250783Sadrian 961250783Sadrian ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); 962250783Sadrian txq->axq_flags |= ATH_TXQ_PUTRUNNING; 963250783Sadrian 964250796Sadrian ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, 965250796Sadrian &txq->axq_link); 966250796Sadrian ath_hal_txstart(sc->sc_ah, txq->axq_qnum); 967218065Sadrian} 968218065Sadrian 969227364Sadrian/* 970227364Sadrian * Hand off a packet to the hardware (or mcast queue.) 971227364Sadrian * 972227364Sadrian * The relevant hardware txq should be locked. 973227364Sadrian */ 974227364Sadrianstatic void 975238930Sadrianath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, 976238930Sadrian struct ath_buf *bf) 977227364Sadrian{ 978243786Sadrian ATH_TX_LOCK_ASSERT(sc); 979227364Sadrian 980243162Sadrian#ifdef ATH_DEBUG_ALQ 981243162Sadrian if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) 982243162Sadrian ath_tx_alq_post(sc, bf); 983243162Sadrian#endif 984243162Sadrian 985227364Sadrian if (txq->axq_qnum == ATH_TXQ_SWQ) 986227364Sadrian ath_tx_handoff_mcast(sc, txq, bf); 987227364Sadrian else 988227364Sadrian ath_tx_handoff_hw(sc, txq, bf); 989227364Sadrian} 990227364Sadrian 991218154Sadrianstatic int 992218154Sadrianath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, 993229949Sadrian struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, 994229949Sadrian int *keyix) 995218154Sadrian{ 996233330Sadrian DPRINTF(sc, ATH_DEBUG_XMIT, 997233330Sadrian "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", 998233330Sadrian __func__, 999233330Sadrian *hdrlen, 1000233330Sadrian *pktlen, 1001233330Sadrian isfrag, 1002233330Sadrian iswep, 1003233330Sadrian m0); 1004233330Sadrian 1005218154Sadrian if (iswep) { 1006218154Sadrian const struct ieee80211_cipher *cip; 1007218154Sadrian struct ieee80211_key *k; 1008218154Sadrian 1009218154Sadrian /* 1010218154Sadrian * Construct the 802.11 header+trailer for an encrypted 1011218154Sadrian * frame. The only reason this can fail is because of an 1012218154Sadrian * unknown or unsupported cipher/key type. 1013218154Sadrian */ 1014218154Sadrian k = ieee80211_crypto_encap(ni, m0); 1015218154Sadrian if (k == NULL) { 1016218154Sadrian /* 1017218154Sadrian * This can happen when the key is yanked after the 1018218154Sadrian * frame was queued. Just discard the frame; the 1019218154Sadrian * 802.11 layer counts failures and provides 1020218154Sadrian * debugging/diagnostics. 1021218154Sadrian */ 1022229949Sadrian return (0); 1023218154Sadrian } 1024218154Sadrian /* 1025218154Sadrian * Adjust the packet + header lengths for the crypto 1026218154Sadrian * additions and calculate the h/w key index. When 1027218154Sadrian * a s/w mic is done the frame will have had any mic 1028218154Sadrian * added to it prior to entry so m0->m_pkthdr.len will 1029218154Sadrian * account for it. Otherwise we need to add it to the 1030218154Sadrian * packet length. 1031218154Sadrian */ 1032218154Sadrian cip = k->wk_cipher; 1033218154Sadrian (*hdrlen) += cip->ic_header; 1034218154Sadrian (*pktlen) += cip->ic_header + cip->ic_trailer; 1035218154Sadrian /* NB: frags always have any TKIP MIC done in s/w */ 1036218154Sadrian if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) 1037218154Sadrian (*pktlen) += cip->ic_miclen; 1038218154Sadrian (*keyix) = k->wk_keyix; 1039218154Sadrian } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { 1040218154Sadrian /* 1041218154Sadrian * Use station key cache slot, if assigned. 1042218154Sadrian */ 1043218154Sadrian (*keyix) = ni->ni_ucastkey.wk_keyix; 1044218154Sadrian if ((*keyix) == IEEE80211_KEYIX_NONE) 1045218154Sadrian (*keyix) = HAL_TXKEYIX_INVALID; 1046218154Sadrian } else 1047218154Sadrian (*keyix) = HAL_TXKEYIX_INVALID; 1048218154Sadrian 1049229949Sadrian return (1); 1050218154Sadrian} 1051218154Sadrian 1052233989Sadrian/* 1053233989Sadrian * Calculate whether interoperability protection is required for 1054233989Sadrian * this frame. 1055233989Sadrian * 1056233989Sadrian * This requires the rate control information be filled in, 1057233989Sadrian * as the protection requirement depends upon the current 1058233989Sadrian * operating mode / PHY. 1059233989Sadrian */ 1060233989Sadrianstatic void 1061233989Sadrianath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) 1062233989Sadrian{ 1063233989Sadrian struct ieee80211_frame *wh; 1064233989Sadrian uint8_t rix; 1065233989Sadrian uint16_t flags; 1066233989Sadrian int shortPreamble; 1067233989Sadrian const HAL_RATE_TABLE *rt = sc->sc_currates; 1068233989Sadrian struct ifnet *ifp = sc->sc_ifp; 1069233989Sadrian struct ieee80211com *ic = ifp->if_l2com; 1070233989Sadrian 1071233989Sadrian flags = bf->bf_state.bfs_txflags; 1072233989Sadrian rix = bf->bf_state.bfs_rc[0].rix; 1073233989Sadrian shortPreamble = bf->bf_state.bfs_shpream; 1074233989Sadrian wh = mtod(bf->bf_m, struct ieee80211_frame *); 1075233989Sadrian 1076233989Sadrian /* 1077233989Sadrian * If 802.11g protection is enabled, determine whether 1078233989Sadrian * to use RTS/CTS or just CTS. Note that this is only 1079233989Sadrian * done for OFDM unicast frames. 1080233989Sadrian */ 1081233989Sadrian if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1082233989Sadrian rt->info[rix].phy == IEEE80211_T_OFDM && 1083233989Sadrian (flags & HAL_TXDESC_NOACK) == 0) { 1084233989Sadrian bf->bf_state.bfs_doprot = 1; 1085233989Sadrian /* XXX fragments must use CCK rates w/ protection */ 1086233989Sadrian if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { 1087233989Sadrian flags |= HAL_TXDESC_RTSENA; 1088233989Sadrian } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { 1089233989Sadrian flags |= HAL_TXDESC_CTSENA; 1090233989Sadrian } 1091233989Sadrian /* 1092233989Sadrian * For frags it would be desirable to use the 1093233989Sadrian * highest CCK rate for RTS/CTS. But stations 1094233989Sadrian * farther away may detect it at a lower CCK rate 1095233989Sadrian * so use the configured protection rate instead 1096233989Sadrian * (for now). 1097233989Sadrian */ 1098233989Sadrian sc->sc_stats.ast_tx_protect++; 1099233989Sadrian } 1100233989Sadrian 1101233989Sadrian /* 1102233989Sadrian * If 11n protection is enabled and it's a HT frame, 1103233989Sadrian * enable RTS. 1104233989Sadrian * 1105233989Sadrian * XXX ic_htprotmode or ic_curhtprotmode? 1106233989Sadrian * XXX should it_htprotmode only matter if ic_curhtprotmode 1107233989Sadrian * XXX indicates it's not a HT pure environment? 1108233989Sadrian */ 1109233989Sadrian if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && 1110233989Sadrian rt->info[rix].phy == IEEE80211_T_HT && 1111233989Sadrian (flags & HAL_TXDESC_NOACK) == 0) { 1112233989Sadrian flags |= HAL_TXDESC_RTSENA; 1113233989Sadrian sc->sc_stats.ast_tx_htprotect++; 1114233989Sadrian } 1115233989Sadrian bf->bf_state.bfs_txflags = flags; 1116233989Sadrian} 1117233989Sadrian 1118233989Sadrian/* 1119233989Sadrian * Update the frame duration given the currently selected rate. 1120233989Sadrian * 1121233989Sadrian * This also updates the frame duration value, so it will require 1122233989Sadrian * a DMA flush. 1123233989Sadrian */ 1124233989Sadrianstatic void 1125233989Sadrianath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) 1126233989Sadrian{ 1127233989Sadrian struct ieee80211_frame *wh; 1128233989Sadrian uint8_t rix; 1129233989Sadrian uint16_t flags; 1130233989Sadrian int shortPreamble; 1131233989Sadrian struct ath_hal *ah = sc->sc_ah; 1132233989Sadrian const HAL_RATE_TABLE *rt = sc->sc_currates; 1133233989Sadrian int isfrag = bf->bf_m->m_flags & M_FRAG; 1134233989Sadrian 1135233989Sadrian flags = bf->bf_state.bfs_txflags; 1136233989Sadrian rix = bf->bf_state.bfs_rc[0].rix; 1137233989Sadrian shortPreamble = bf->bf_state.bfs_shpream; 1138233989Sadrian wh = mtod(bf->bf_m, struct ieee80211_frame *); 1139233989Sadrian 1140233989Sadrian /* 1141233989Sadrian * Calculate duration. This logically belongs in the 802.11 1142233989Sadrian * layer but it lacks sufficient information to calculate it. 1143233989Sadrian */ 1144233989Sadrian if ((flags & HAL_TXDESC_NOACK) == 0 && 1145233989Sadrian (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { 1146233989Sadrian u_int16_t dur; 1147233989Sadrian if (shortPreamble) 1148233989Sadrian dur = rt->info[rix].spAckDuration; 1149233989Sadrian else 1150233989Sadrian dur = rt->info[rix].lpAckDuration; 1151233989Sadrian if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { 1152233989Sadrian dur += dur; /* additional SIFS+ACK */ 1153233989Sadrian /* 1154233989Sadrian * Include the size of next fragment so NAV is 1155233989Sadrian * updated properly. The last fragment uses only 1156233989Sadrian * the ACK duration 1157242144Sadrian * 1158242144Sadrian * XXX TODO: ensure that the rate lookup for each 1159242144Sadrian * fragment is the same as the rate used by the 1160242144Sadrian * first fragment! 1161233989Sadrian */ 1162251014Sadrian dur += ath_hal_computetxtime(ah, 1163251014Sadrian rt, 1164251014Sadrian bf->bf_nextfraglen, 1165251014Sadrian rix, shortPreamble); 1166233989Sadrian } 1167233989Sadrian if (isfrag) { 1168233989Sadrian /* 1169233989Sadrian * Force hardware to use computed duration for next 1170233989Sadrian * fragment by disabling multi-rate retry which updates 1171233989Sadrian * duration based on the multi-rate duration table. 1172233989Sadrian */ 1173233989Sadrian bf->bf_state.bfs_ismrr = 0; 1174233989Sadrian bf->bf_state.bfs_try0 = ATH_TXMGTTRY; 1175233989Sadrian /* XXX update bfs_rc[0].try? */ 1176233989Sadrian } 1177233989Sadrian 1178233989Sadrian /* Update the duration field itself */ 1179233989Sadrian *(u_int16_t *)wh->i_dur = htole16(dur); 1180233989Sadrian } 1181233989Sadrian} 1182233989Sadrian 1183218932Sadrianstatic uint8_t 1184218932Sadrianath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, 1185227364Sadrian int cix, int shortPreamble) 1186218157Sadrian{ 1187218932Sadrian uint8_t ctsrate; 1188218932Sadrian 1189218157Sadrian /* 1190218157Sadrian * CTS transmit rate is derived from the transmit rate 1191218157Sadrian * by looking in the h/w rate table. We must also factor 1192218157Sadrian * in whether or not a short preamble is to be used. 1193218157Sadrian */ 1194218157Sadrian /* NB: cix is set above where RTS/CTS is enabled */ 1195218157Sadrian KASSERT(cix != 0xff, ("cix not setup")); 1196218932Sadrian ctsrate = rt->info[cix].rateCode; 1197218932Sadrian 1198218932Sadrian /* XXX this should only matter for legacy rates */ 1199218932Sadrian if (shortPreamble) 1200218932Sadrian ctsrate |= rt->info[cix].shortPreamble; 1201218932Sadrian 1202229949Sadrian return (ctsrate); 1203218932Sadrian} 1204218932Sadrian 1205218932Sadrian/* 1206218932Sadrian * Calculate the RTS/CTS duration for legacy frames. 1207218932Sadrian */ 1208218932Sadrianstatic int 1209218932Sadrianath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, 1210218932Sadrian int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, 1211218932Sadrian int flags) 1212218932Sadrian{ 1213218932Sadrian int ctsduration = 0; 1214218932Sadrian 1215218932Sadrian /* This mustn't be called for HT modes */ 1216218932Sadrian if (rt->info[cix].phy == IEEE80211_T_HT) { 1217218932Sadrian printf("%s: HT rate where it shouldn't be (0x%x)\n", 1218218932Sadrian __func__, rt->info[cix].rateCode); 1219229949Sadrian return (-1); 1220218932Sadrian } 1221218932Sadrian 1222218157Sadrian /* 1223218157Sadrian * Compute the transmit duration based on the frame 1224218157Sadrian * size and the size of an ACK frame. We call into the 1225218157Sadrian * HAL to do the computation since it depends on the 1226218157Sadrian * characteristics of the actual PHY being used. 1227218157Sadrian * 1228218157Sadrian * NB: CTS is assumed the same size as an ACK so we can 1229218157Sadrian * use the precalculated ACK durations. 1230218157Sadrian */ 1231218157Sadrian if (shortPreamble) { 1232218157Sadrian if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1233218932Sadrian ctsduration += rt->info[cix].spAckDuration; 1234218932Sadrian ctsduration += ath_hal_computetxtime(ah, 1235218157Sadrian rt, pktlen, rix, AH_TRUE); 1236218157Sadrian if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1237218932Sadrian ctsduration += rt->info[rix].spAckDuration; 1238218157Sadrian } else { 1239218157Sadrian if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ 1240218932Sadrian ctsduration += rt->info[cix].lpAckDuration; 1241218932Sadrian ctsduration += ath_hal_computetxtime(ah, 1242218157Sadrian rt, pktlen, rix, AH_FALSE); 1243218157Sadrian if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ 1244218932Sadrian ctsduration += rt->info[rix].lpAckDuration; 1245218157Sadrian } 1246218932Sadrian 1247229949Sadrian return (ctsduration); 1248218157Sadrian} 1249218157Sadrian 1250227364Sadrian/* 1251227364Sadrian * Update the given ath_buf with updated rts/cts setup and duration 1252227364Sadrian * values. 1253227364Sadrian * 1254227364Sadrian * To support rate lookups for each software retry, the rts/cts rate 1255227364Sadrian * and cts duration must be re-calculated. 1256227364Sadrian * 1257227364Sadrian * This function assumes the RTS/CTS flags have been set as needed; 1258227364Sadrian * mrr has been disabled; and the rate control lookup has been done. 1259227364Sadrian * 1260227364Sadrian * XXX TODO: MRR need only be disabled for the pre-11n NICs. 1261227364Sadrian * XXX The 11n NICs support per-rate RTS/CTS configuration. 1262227364Sadrian */ 1263227364Sadrianstatic void 1264227364Sadrianath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) 1265218065Sadrian{ 1266227364Sadrian uint16_t ctsduration = 0; 1267227364Sadrian uint8_t ctsrate = 0; 1268227364Sadrian uint8_t rix = bf->bf_state.bfs_rc[0].rix; 1269227364Sadrian uint8_t cix = 0; 1270227364Sadrian const HAL_RATE_TABLE *rt = sc->sc_currates; 1271227364Sadrian 1272227364Sadrian /* 1273227364Sadrian * No RTS/CTS enabled? Don't bother. 1274227364Sadrian */ 1275233966Sadrian if ((bf->bf_state.bfs_txflags & 1276227364Sadrian (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { 1277227364Sadrian /* XXX is this really needed? */ 1278227364Sadrian bf->bf_state.bfs_ctsrate = 0; 1279227364Sadrian bf->bf_state.bfs_ctsduration = 0; 1280227364Sadrian return; 1281227364Sadrian } 1282227364Sadrian 1283227364Sadrian /* 1284227364Sadrian * If protection is enabled, use the protection rix control 1285227364Sadrian * rate. Otherwise use the rate0 control rate. 1286227364Sadrian */ 1287227364Sadrian if (bf->bf_state.bfs_doprot) 1288227364Sadrian rix = sc->sc_protrix; 1289227364Sadrian else 1290227364Sadrian rix = bf->bf_state.bfs_rc[0].rix; 1291227364Sadrian 1292227364Sadrian /* 1293227364Sadrian * If the raw path has hard-coded ctsrate0 to something, 1294227364Sadrian * use it. 1295227364Sadrian */ 1296227364Sadrian if (bf->bf_state.bfs_ctsrate0 != 0) 1297227364Sadrian cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); 1298227364Sadrian else 1299227364Sadrian /* Control rate from above */ 1300227364Sadrian cix = rt->info[rix].controlRate; 1301227364Sadrian 1302227364Sadrian /* Calculate the rtscts rate for the given cix */ 1303227364Sadrian ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, 1304227364Sadrian bf->bf_state.bfs_shpream); 1305227364Sadrian 1306227364Sadrian /* The 11n chipsets do ctsduration calculations for you */ 1307227364Sadrian if (! ath_tx_is_11n(sc)) 1308227364Sadrian ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, 1309227364Sadrian bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, 1310233966Sadrian rt, bf->bf_state.bfs_txflags); 1311227364Sadrian 1312227364Sadrian /* Squirrel away in ath_buf */ 1313227364Sadrian bf->bf_state.bfs_ctsrate = ctsrate; 1314227364Sadrian bf->bf_state.bfs_ctsduration = ctsduration; 1315227364Sadrian 1316227364Sadrian /* 1317227364Sadrian * Must disable multi-rate retry when using RTS/CTS. 1318227364Sadrian */ 1319238961Sadrian if (!sc->sc_mrrprot) { 1320238961Sadrian bf->bf_state.bfs_ismrr = 0; 1321238961Sadrian bf->bf_state.bfs_try0 = 1322238961Sadrian bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ 1323238961Sadrian } 1324227364Sadrian} 1325227364Sadrian 1326227364Sadrian/* 1327227364Sadrian * Setup the descriptor chain for a normal or fast-frame 1328227364Sadrian * frame. 1329239051Sadrian * 1330239051Sadrian * XXX TODO: extend to include the destination hardware QCU ID. 1331239051Sadrian * Make sure that is correct. Make sure that when being added 1332239051Sadrian * to the mcastq, the CABQ QCUID is set or things will get a bit 1333239051Sadrian * odd. 1334227364Sadrian */ 1335227364Sadrianstatic void 1336227364Sadrianath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) 1337227364Sadrian{ 1338227364Sadrian struct ath_desc *ds = bf->bf_desc; 1339227364Sadrian struct ath_hal *ah = sc->sc_ah; 1340227364Sadrian 1341242951Sadrian if (bf->bf_state.bfs_txrate0 == 0) 1342259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 1343259341Srpaulo "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); 1344242951Sadrian 1345227364Sadrian ath_hal_setuptxdesc(ah, ds 1346227364Sadrian , bf->bf_state.bfs_pktlen /* packet length */ 1347227364Sadrian , bf->bf_state.bfs_hdrlen /* header length */ 1348227364Sadrian , bf->bf_state.bfs_atype /* Atheros packet type */ 1349227364Sadrian , bf->bf_state.bfs_txpower /* txpower */ 1350227364Sadrian , bf->bf_state.bfs_txrate0 1351227364Sadrian , bf->bf_state.bfs_try0 /* series 0 rate/tries */ 1352227364Sadrian , bf->bf_state.bfs_keyix /* key cache index */ 1353227364Sadrian , bf->bf_state.bfs_txantenna /* antenna mode */ 1354233966Sadrian , bf->bf_state.bfs_txflags /* flags */ 1355227364Sadrian , bf->bf_state.bfs_ctsrate /* rts/cts rate */ 1356227364Sadrian , bf->bf_state.bfs_ctsduration /* rts/cts duration */ 1357227364Sadrian ); 1358227364Sadrian 1359227364Sadrian /* 1360227364Sadrian * This will be overriden when the descriptor chain is written. 1361227364Sadrian */ 1362227364Sadrian bf->bf_lastds = ds; 1363227364Sadrian bf->bf_last = bf; 1364227364Sadrian 1365238947Sadrian /* Set rate control and descriptor chain for this frame */ 1366238947Sadrian ath_tx_set_ratectrl(sc, bf->bf_node, bf); 1367242656Sadrian ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); 1368227364Sadrian} 1369227364Sadrian 1370227364Sadrian/* 1371227364Sadrian * Do a rate lookup. 1372227364Sadrian * 1373227364Sadrian * This performs a rate lookup for the given ath_buf only if it's required. 1374227364Sadrian * Non-data frames and raw frames don't require it. 1375227364Sadrian * 1376227364Sadrian * This populates the primary and MRR entries; MRR values are 1377227364Sadrian * then disabled later on if something requires it (eg RTS/CTS on 1378227364Sadrian * pre-11n chipsets. 1379227364Sadrian * 1380227364Sadrian * This needs to be done before the RTS/CTS fields are calculated 1381227364Sadrian * as they may depend upon the rate chosen. 1382227364Sadrian */ 1383227364Sadrianstatic void 1384227364Sadrianath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) 1385227364Sadrian{ 1386227364Sadrian uint8_t rate, rix; 1387227364Sadrian int try0; 1388227364Sadrian 1389227364Sadrian if (! bf->bf_state.bfs_doratelookup) 1390227364Sadrian return; 1391227364Sadrian 1392227364Sadrian /* Get rid of any previous state */ 1393227364Sadrian bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1394227364Sadrian 1395227364Sadrian ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); 1396227364Sadrian ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, 1397227364Sadrian bf->bf_state.bfs_pktlen, &rix, &try0, &rate); 1398227364Sadrian 1399227364Sadrian /* In case MRR is disabled, make sure rc[0] is setup correctly */ 1400227364Sadrian bf->bf_state.bfs_rc[0].rix = rix; 1401227364Sadrian bf->bf_state.bfs_rc[0].ratecode = rate; 1402227364Sadrian bf->bf_state.bfs_rc[0].tries = try0; 1403227364Sadrian 1404227364Sadrian if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) 1405227364Sadrian ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, 1406227364Sadrian bf->bf_state.bfs_rc); 1407227364Sadrian ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); 1408227364Sadrian 1409227364Sadrian sc->sc_txrix = rix; /* for LED blinking */ 1410227364Sadrian sc->sc_lastdatarix = rix; /* for fast frames */ 1411227364Sadrian bf->bf_state.bfs_try0 = try0; 1412227364Sadrian bf->bf_state.bfs_txrate0 = rate; 1413227364Sadrian} 1414227364Sadrian 1415227364Sadrian/* 1416240883Sadrian * Update the CLRDMASK bit in the ath_buf if it needs to be set. 1417240883Sadrian */ 1418240883Sadrianstatic void 1419240883Sadrianath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, 1420240883Sadrian struct ath_buf *bf) 1421240883Sadrian{ 1422245708Sadrian struct ath_node *an = ATH_NODE(bf->bf_node); 1423240883Sadrian 1424243786Sadrian ATH_TX_LOCK_ASSERT(sc); 1425240883Sadrian 1426245708Sadrian if (an->clrdmask == 1) { 1427240883Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1428245708Sadrian an->clrdmask = 0; 1429240883Sadrian } 1430240883Sadrian} 1431240883Sadrian 1432240883Sadrian/* 1433250665Sadrian * Return whether this frame should be software queued or 1434250665Sadrian * direct dispatched. 1435250665Sadrian * 1436250665Sadrian * When doing powersave, BAR frames should be queued but other management 1437250665Sadrian * frames should be directly sent. 1438250665Sadrian * 1439250665Sadrian * When not doing powersave, stick BAR frames into the hardware queue 1440250665Sadrian * so it goes out even though the queue is paused. 1441250665Sadrian * 1442250665Sadrian * For now, management frames are also software queued by default. 1443250665Sadrian */ 1444250665Sadrianstatic int 1445250665Sadrianath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, 1446250665Sadrian struct mbuf *m0, int *queue_to_head) 1447250665Sadrian{ 1448250665Sadrian struct ieee80211_node *ni = &an->an_node; 1449250665Sadrian struct ieee80211_frame *wh; 1450250665Sadrian uint8_t type, subtype; 1451250665Sadrian 1452250665Sadrian wh = mtod(m0, struct ieee80211_frame *); 1453250665Sadrian type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1454250665Sadrian subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1455250665Sadrian 1456250665Sadrian (*queue_to_head) = 0; 1457250665Sadrian 1458250665Sadrian /* If it's not in powersave - direct-dispatch BAR */ 1459250665Sadrian if ((ATH_NODE(ni)->an_is_powersave == 0) 1460250665Sadrian && type == IEEE80211_FC0_TYPE_CTL && 1461250665Sadrian subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1462250665Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 1463250665Sadrian "%s: BAR: TX'ing direct\n", __func__); 1464250665Sadrian return (0); 1465250665Sadrian } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1466250665Sadrian && type == IEEE80211_FC0_TYPE_CTL && 1467250665Sadrian subtype == IEEE80211_FC0_SUBTYPE_BAR) { 1468250665Sadrian /* BAR TX whilst asleep; queue */ 1469250665Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 1470250665Sadrian "%s: swq: TX'ing\n", __func__); 1471250665Sadrian (*queue_to_head) = 1; 1472250665Sadrian return (1); 1473250665Sadrian } else if ((ATH_NODE(ni)->an_is_powersave == 1) 1474250665Sadrian && (type == IEEE80211_FC0_TYPE_MGT || 1475250665Sadrian type == IEEE80211_FC0_TYPE_CTL)) { 1476250665Sadrian /* 1477250665Sadrian * Other control/mgmt frame; bypass software queuing 1478250665Sadrian * for now! 1479250665Sadrian */ 1480259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 1481250665Sadrian "%s: %6D: Node is asleep; sending mgmt " 1482250665Sadrian "(type=%d, subtype=%d)\n", 1483259341Srpaulo __func__, ni->ni_macaddr, ":", type, subtype); 1484250665Sadrian return (0); 1485250665Sadrian } else { 1486250665Sadrian return (1); 1487250665Sadrian } 1488250665Sadrian} 1489250665Sadrian 1490250665Sadrian 1491250665Sadrian/* 1492227364Sadrian * Transmit the given frame to the hardware. 1493227364Sadrian * 1494227364Sadrian * The frame must already be setup; rate control must already have 1495227364Sadrian * been done. 1496227364Sadrian * 1497227364Sadrian * XXX since the TXQ lock is being held here (and I dislike holding 1498227364Sadrian * it for this long when not doing software aggregation), later on 1499227364Sadrian * break this function into "setup_normal" and "xmit_normal". The 1500227364Sadrian * lock only needs to be held for the ath_tx_handoff call. 1501250665Sadrian * 1502250665Sadrian * XXX we don't update the leak count here - if we're doing 1503250665Sadrian * direct frame dispatch, we need to be able to do it without 1504250665Sadrian * decrementing the leak count (eg multicast queue frames.) 1505227364Sadrian */ 1506227364Sadrianstatic void 1507227364Sadrianath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, 1508227364Sadrian struct ath_buf *bf) 1509227364Sadrian{ 1510240883Sadrian struct ath_node *an = ATH_NODE(bf->bf_node); 1511240883Sadrian struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 1512227364Sadrian 1513243786Sadrian ATH_TX_LOCK_ASSERT(sc); 1514227364Sadrian 1515240883Sadrian /* 1516240883Sadrian * For now, just enable CLRDMASK. ath_tx_xmit_normal() does 1517240883Sadrian * set a completion handler however it doesn't (yet) properly 1518240883Sadrian * handle the strict ordering requirements needed for normal, 1519240883Sadrian * non-aggregate session frames. 1520240883Sadrian * 1521240883Sadrian * Once this is implemented, only set CLRDMASK like this for 1522240883Sadrian * frames that must go out - eg management/raw frames. 1523240883Sadrian */ 1524240883Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 1525240883Sadrian 1526227364Sadrian /* Setup the descriptor before handoff */ 1527227364Sadrian ath_tx_do_ratelookup(sc, bf); 1528233989Sadrian ath_tx_calc_duration(sc, bf); 1529233989Sadrian ath_tx_calc_protection(sc, bf); 1530233989Sadrian ath_tx_set_rtscts(sc, bf); 1531227364Sadrian ath_tx_rate_fill_rcflags(sc, bf); 1532227364Sadrian ath_tx_setds(sc, bf); 1533227364Sadrian 1534240883Sadrian /* Track per-TID hardware queue depth correctly */ 1535240883Sadrian tid->hwq_depth++; 1536240724Sadrian 1537240883Sadrian /* Assign the completion handler */ 1538240883Sadrian bf->bf_comp = ath_tx_normal_comp; 1539240883Sadrian 1540227364Sadrian /* Hand off to hardware */ 1541227364Sadrian ath_tx_handoff(sc, txq, bf); 1542227364Sadrian} 1543227364Sadrian 1544240722Sadrian/* 1545240722Sadrian * Do the basic frame setup stuff that's required before the frame 1546240722Sadrian * is added to a software queue. 1547240722Sadrian * 1548240722Sadrian * All frames get mostly the same treatment and it's done once. 1549240722Sadrian * Retransmits fiddle with things like the rate control setup, 1550240722Sadrian * setting the retransmit bit in the packet; doing relevant DMA/bus 1551240722Sadrian * syncing and relinking it (back) into the hardware TX queue. 1552240722Sadrian * 1553240722Sadrian * Note that this may cause the mbuf to be reallocated, so 1554240722Sadrian * m0 may not be valid. 1555240722Sadrian */ 1556227364Sadrianstatic int 1557227364Sadrianath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, 1558234009Sadrian struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) 1559227364Sadrian{ 1560218065Sadrian struct ieee80211vap *vap = ni->ni_vap; 1561218065Sadrian struct ath_hal *ah = sc->sc_ah; 1562218065Sadrian struct ifnet *ifp = sc->sc_ifp; 1563218065Sadrian struct ieee80211com *ic = ifp->if_l2com; 1564218065Sadrian const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; 1565218065Sadrian int error, iswep, ismcast, isfrag, ismrr; 1566227364Sadrian int keyix, hdrlen, pktlen, try0 = 0; 1567227364Sadrian u_int8_t rix = 0, txrate = 0; 1568218065Sadrian struct ath_desc *ds; 1569218065Sadrian struct ieee80211_frame *wh; 1570227364Sadrian u_int subtype, flags; 1571218065Sadrian HAL_PKT_TYPE atype; 1572218065Sadrian const HAL_RATE_TABLE *rt; 1573218065Sadrian HAL_BOOL shortPreamble; 1574218065Sadrian struct ath_node *an; 1575218065Sadrian u_int pri; 1576218065Sadrian 1577236880Sadrian /* 1578236880Sadrian * To ensure that both sequence numbers and the CCMP PN handling 1579236880Sadrian * is "correct", make sure that the relevant TID queue is locked. 1580236880Sadrian * Otherwise the CCMP PN and seqno may appear out of order, causing 1581236880Sadrian * re-ordered frames to have out of order CCMP PN's, resulting 1582236880Sadrian * in many, many frame drops. 1583236880Sadrian */ 1584243786Sadrian ATH_TX_LOCK_ASSERT(sc); 1585236880Sadrian 1586218065Sadrian wh = mtod(m0, struct ieee80211_frame *); 1587262007Skevlo iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; 1588218065Sadrian ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1589218065Sadrian isfrag = m0->m_flags & M_FRAG; 1590218065Sadrian hdrlen = ieee80211_anyhdrsize(wh); 1591218065Sadrian /* 1592218065Sadrian * Packet length must not include any 1593218065Sadrian * pad bytes; deduct them here. 1594218065Sadrian */ 1595218065Sadrian pktlen = m0->m_pkthdr.len - (hdrlen & 3); 1596218065Sadrian 1597218154Sadrian /* Handle encryption twiddling if needed */ 1598227364Sadrian if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, 1599227364Sadrian &pktlen, &keyix)) { 1600218154Sadrian ath_freetx(m0); 1601218154Sadrian return EIO; 1602218154Sadrian } 1603218065Sadrian 1604218154Sadrian /* packet header may have moved, reset our local pointer */ 1605218154Sadrian wh = mtod(m0, struct ieee80211_frame *); 1606218065Sadrian 1607218065Sadrian pktlen += IEEE80211_CRC_LEN; 1608218065Sadrian 1609218065Sadrian /* 1610218065Sadrian * Load the DMA map so any coalescing is done. This 1611218065Sadrian * also calculates the number of descriptors we need. 1612218065Sadrian */ 1613218065Sadrian error = ath_tx_dmasetup(sc, bf, m0); 1614218065Sadrian if (error != 0) 1615218065Sadrian return error; 1616218065Sadrian bf->bf_node = ni; /* NB: held reference */ 1617218065Sadrian m0 = bf->bf_m; /* NB: may have changed */ 1618218065Sadrian wh = mtod(m0, struct ieee80211_frame *); 1619218065Sadrian 1620218065Sadrian /* setup descriptors */ 1621218065Sadrian ds = bf->bf_desc; 1622218065Sadrian rt = sc->sc_currates; 1623218065Sadrian KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 1624218065Sadrian 1625218065Sadrian /* 1626218065Sadrian * NB: the 802.11 layer marks whether or not we should 1627218065Sadrian * use short preamble based on the current mode and 1628218065Sadrian * negotiated parameters. 1629218065Sadrian */ 1630218065Sadrian if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && 1631218065Sadrian (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { 1632218065Sadrian shortPreamble = AH_TRUE; 1633218065Sadrian sc->sc_stats.ast_tx_shortpre++; 1634218065Sadrian } else { 1635218065Sadrian shortPreamble = AH_FALSE; 1636218065Sadrian } 1637218065Sadrian 1638218065Sadrian an = ATH_NODE(ni); 1639240724Sadrian //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 1640240724Sadrian flags = 0; 1641218065Sadrian ismrr = 0; /* default no multi-rate retry*/ 1642218065Sadrian pri = M_WME_GETAC(m0); /* honor classification */ 1643218065Sadrian /* XXX use txparams instead of fixed values */ 1644218065Sadrian /* 1645218065Sadrian * Calculate Atheros packet type from IEEE80211 packet header, 1646218065Sadrian * setup for rate calculations, and select h/w transmit queue. 1647218065Sadrian */ 1648218065Sadrian switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { 1649218065Sadrian case IEEE80211_FC0_TYPE_MGT: 1650218065Sadrian subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1651218065Sadrian if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) 1652218065Sadrian atype = HAL_PKT_TYPE_BEACON; 1653218065Sadrian else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1654218065Sadrian atype = HAL_PKT_TYPE_PROBE_RESP; 1655218065Sadrian else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) 1656218065Sadrian atype = HAL_PKT_TYPE_ATIM; 1657218065Sadrian else 1658218065Sadrian atype = HAL_PKT_TYPE_NORMAL; /* XXX */ 1659218065Sadrian rix = an->an_mgmtrix; 1660218065Sadrian txrate = rt->info[rix].rateCode; 1661218065Sadrian if (shortPreamble) 1662218065Sadrian txrate |= rt->info[rix].shortPreamble; 1663218065Sadrian try0 = ATH_TXMGTTRY; 1664218065Sadrian flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1665218065Sadrian break; 1666218065Sadrian case IEEE80211_FC0_TYPE_CTL: 1667218065Sadrian atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ 1668218065Sadrian rix = an->an_mgmtrix; 1669218065Sadrian txrate = rt->info[rix].rateCode; 1670218065Sadrian if (shortPreamble) 1671218065Sadrian txrate |= rt->info[rix].shortPreamble; 1672218065Sadrian try0 = ATH_TXMGTTRY; 1673218065Sadrian flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 1674218065Sadrian break; 1675218065Sadrian case IEEE80211_FC0_TYPE_DATA: 1676218065Sadrian atype = HAL_PKT_TYPE_NORMAL; /* default */ 1677218065Sadrian /* 1678218065Sadrian * Data frames: multicast frames go out at a fixed rate, 1679218065Sadrian * EAPOL frames use the mgmt frame rate; otherwise consult 1680218065Sadrian * the rate control module for the rate to use. 1681218065Sadrian */ 1682218065Sadrian if (ismcast) { 1683218065Sadrian rix = an->an_mcastrix; 1684218065Sadrian txrate = rt->info[rix].rateCode; 1685218065Sadrian if (shortPreamble) 1686218065Sadrian txrate |= rt->info[rix].shortPreamble; 1687218065Sadrian try0 = 1; 1688218065Sadrian } else if (m0->m_flags & M_EAPOL) { 1689218065Sadrian /* XXX? maybe always use long preamble? */ 1690218065Sadrian rix = an->an_mgmtrix; 1691218065Sadrian txrate = rt->info[rix].rateCode; 1692218065Sadrian if (shortPreamble) 1693218065Sadrian txrate |= rt->info[rix].shortPreamble; 1694218065Sadrian try0 = ATH_TXMAXTRY; /* XXX?too many? */ 1695218065Sadrian } else { 1696227364Sadrian /* 1697227364Sadrian * Do rate lookup on each TX, rather than using 1698227364Sadrian * the hard-coded TX information decided here. 1699227364Sadrian */ 1700227364Sadrian ismrr = 1; 1701227364Sadrian bf->bf_state.bfs_doratelookup = 1; 1702218065Sadrian } 1703218065Sadrian if (cap->cap_wmeParams[pri].wmep_noackPolicy) 1704218065Sadrian flags |= HAL_TXDESC_NOACK; 1705218065Sadrian break; 1706218065Sadrian default: 1707218065Sadrian if_printf(ifp, "bogus frame type 0x%x (%s)\n", 1708218065Sadrian wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); 1709218065Sadrian /* XXX statistic */ 1710249000Sadrian /* XXX free tx dmamap */ 1711218065Sadrian ath_freetx(m0); 1712218065Sadrian return EIO; 1713218065Sadrian } 1714218065Sadrian 1715237041Sadrian /* 1716237041Sadrian * There are two known scenarios where the frame AC doesn't match 1717237041Sadrian * what the destination TXQ is. 1718237041Sadrian * 1719237041Sadrian * + non-QoS frames (eg management?) that the net80211 stack has 1720237041Sadrian * assigned a higher AC to, but since it's a non-QoS TID, it's 1721237041Sadrian * being thrown into TID 16. TID 16 gets the AC_BE queue. 1722237041Sadrian * It's quite possible that management frames should just be 1723237041Sadrian * direct dispatched to hardware rather than go via the software 1724237041Sadrian * queue; that should be investigated in the future. There are 1725237041Sadrian * some specific scenarios where this doesn't make sense, mostly 1726237041Sadrian * surrounding ADDBA request/response - hence why that is special 1727237041Sadrian * cased. 1728237041Sadrian * 1729237041Sadrian * + Multicast frames going into the VAP mcast queue. That shows up 1730237041Sadrian * as "TXQ 11". 1731237041Sadrian * 1732237041Sadrian * This driver should eventually support separate TID and TXQ locking, 1733237041Sadrian * allowing for arbitrary AC frames to appear on arbitrary software 1734237041Sadrian * queues, being queued to the "correct" hardware queue when needed. 1735237041Sadrian */ 1736237041Sadrian#if 0 1737235750Sadrian if (txq != sc->sc_ac2q[pri]) { 1738259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 1739235750Sadrian "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", 1740235750Sadrian __func__, 1741235750Sadrian txq, 1742235750Sadrian txq->axq_qnum, 1743235750Sadrian pri, 1744235750Sadrian sc->sc_ac2q[pri], 1745235750Sadrian sc->sc_ac2q[pri]->axq_qnum); 1746235750Sadrian } 1747237041Sadrian#endif 1748235750Sadrian 1749218065Sadrian /* 1750218065Sadrian * Calculate miscellaneous flags. 1751218065Sadrian */ 1752218065Sadrian if (ismcast) { 1753218065Sadrian flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ 1754218065Sadrian } else if (pktlen > vap->iv_rtsthreshold && 1755218065Sadrian (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { 1756218065Sadrian flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ 1757218065Sadrian sc->sc_stats.ast_tx_rts++; 1758218065Sadrian } 1759218065Sadrian if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ 1760218065Sadrian sc->sc_stats.ast_tx_noack++; 1761218065Sadrian#ifdef IEEE80211_SUPPORT_TDMA 1762218065Sadrian if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { 1763218065Sadrian DPRINTF(sc, ATH_DEBUG_TDMA, 1764218065Sadrian "%s: discard frame, ACK required w/ TDMA\n", __func__); 1765218065Sadrian sc->sc_stats.ast_tdma_ack++; 1766249000Sadrian /* XXX free tx dmamap */ 1767218065Sadrian ath_freetx(m0); 1768218065Sadrian return EIO; 1769218065Sadrian } 1770218065Sadrian#endif 1771218065Sadrian 1772218065Sadrian /* 1773227364Sadrian * Determine if a tx interrupt should be generated for 1774227364Sadrian * this descriptor. We take a tx interrupt to reap 1775227364Sadrian * descriptors when the h/w hits an EOL condition or 1776227364Sadrian * when the descriptor is specifically marked to generate 1777227364Sadrian * an interrupt. We periodically mark descriptors in this 1778227364Sadrian * way to insure timely replenishing of the supply needed 1779227364Sadrian * for sending frames. Defering interrupts reduces system 1780227364Sadrian * load and potentially allows more concurrent work to be 1781227364Sadrian * done but if done to aggressively can cause senders to 1782227364Sadrian * backup. 1783227364Sadrian * 1784227364Sadrian * NB: use >= to deal with sc_txintrperiod changing 1785227364Sadrian * dynamically through sysctl. 1786218065Sadrian */ 1787227364Sadrian if (flags & HAL_TXDESC_INTREQ) { 1788227364Sadrian txq->axq_intrcnt = 0; 1789227364Sadrian } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { 1790227364Sadrian flags |= HAL_TXDESC_INTREQ; 1791227364Sadrian txq->axq_intrcnt = 0; 1792227364Sadrian } 1793218932Sadrian 1794227364Sadrian /* This point forward is actual TX bits */ 1795218065Sadrian 1796218065Sadrian /* 1797218065Sadrian * At this point we are committed to sending the frame 1798218065Sadrian * and we don't need to look at m_nextpkt; clear it in 1799218065Sadrian * case this frame is part of frag chain. 1800218065Sadrian */ 1801218065Sadrian m0->m_nextpkt = NULL; 1802218065Sadrian 1803218065Sadrian if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 1804218065Sadrian ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 1805218065Sadrian sc->sc_hwmap[rix].ieeerate, -1); 1806218065Sadrian 1807218065Sadrian if (ieee80211_radiotap_active_vap(vap)) { 1808218065Sadrian u_int64_t tsf = ath_hal_gettsf64(ah); 1809218065Sadrian 1810218065Sadrian sc->sc_tx_th.wt_tsf = htole64(tsf); 1811218065Sadrian sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 1812218065Sadrian if (iswep) 1813218065Sadrian sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 1814218065Sadrian if (isfrag) 1815218065Sadrian sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 1816218065Sadrian sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 1817249569Sadrian sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); 1818218065Sadrian sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 1819218065Sadrian 1820218065Sadrian ieee80211_radiotap_tx(vap, m0); 1821218065Sadrian } 1822218065Sadrian 1823227364Sadrian /* Blank the legacy rate array */ 1824227364Sadrian bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 1825227364Sadrian 1826218065Sadrian /* 1827227364Sadrian * ath_buf_set_rate needs at least one rate/try to setup 1828227364Sadrian * the rate scenario. 1829227364Sadrian */ 1830227364Sadrian bf->bf_state.bfs_rc[0].rix = rix; 1831227364Sadrian bf->bf_state.bfs_rc[0].tries = try0; 1832227364Sadrian bf->bf_state.bfs_rc[0].ratecode = txrate; 1833227364Sadrian 1834227364Sadrian /* Store the decided rate index values away */ 1835227364Sadrian bf->bf_state.bfs_pktlen = pktlen; 1836227364Sadrian bf->bf_state.bfs_hdrlen = hdrlen; 1837227364Sadrian bf->bf_state.bfs_atype = atype; 1838249569Sadrian bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); 1839227364Sadrian bf->bf_state.bfs_txrate0 = txrate; 1840227364Sadrian bf->bf_state.bfs_try0 = try0; 1841227364Sadrian bf->bf_state.bfs_keyix = keyix; 1842227364Sadrian bf->bf_state.bfs_txantenna = sc->sc_txantenna; 1843233966Sadrian bf->bf_state.bfs_txflags = flags; 1844227364Sadrian bf->bf_state.bfs_shpream = shortPreamble; 1845227364Sadrian 1846227364Sadrian /* XXX this should be done in ath_tx_setrate() */ 1847227364Sadrian bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ 1848227364Sadrian bf->bf_state.bfs_ctsrate = 0; /* calculated later */ 1849227364Sadrian bf->bf_state.bfs_ctsduration = 0; 1850227364Sadrian bf->bf_state.bfs_ismrr = ismrr; 1851227364Sadrian 1852227364Sadrian return 0; 1853227364Sadrian} 1854227364Sadrian 1855227364Sadrian/* 1856240724Sadrian * Queue a frame to the hardware or software queue. 1857227364Sadrian * 1858227364Sadrian * This can be called by the net80211 code. 1859227364Sadrian * 1860227364Sadrian * XXX what about locking? Or, push the seqno assign into the 1861227364Sadrian * XXX aggregate scheduler so its serialised? 1862240724Sadrian * 1863240724Sadrian * XXX When sending management frames via ath_raw_xmit(), 1864240724Sadrian * should CLRDMASK be set unconditionally? 1865227364Sadrian */ 1866227364Sadrianint 1867227364Sadrianath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, 1868227364Sadrian struct ath_buf *bf, struct mbuf *m0) 1869227364Sadrian{ 1870227364Sadrian struct ieee80211vap *vap = ni->ni_vap; 1871227364Sadrian struct ath_vap *avp = ATH_VAP(vap); 1872232764Sadrian int r = 0; 1873227364Sadrian u_int pri; 1874227364Sadrian int tid; 1875227364Sadrian struct ath_txq *txq; 1876227364Sadrian int ismcast; 1877227364Sadrian const struct ieee80211_frame *wh; 1878227364Sadrian int is_ampdu, is_ampdu_tx, is_ampdu_pending; 1879236872Sadrian ieee80211_seq seqno; 1880227364Sadrian uint8_t type, subtype; 1881250665Sadrian int queue_to_head; 1882227364Sadrian 1883243786Sadrian ATH_TX_LOCK_ASSERT(sc); 1884243786Sadrian 1885227364Sadrian /* 1886227364Sadrian * Determine the target hardware queue. 1887218065Sadrian * 1888234009Sadrian * For multicast frames, the txq gets overridden appropriately 1889234009Sadrian * depending upon the state of PS. 1890227364Sadrian * 1891227364Sadrian * For any other frame, we do a TID/QoS lookup inside the frame 1892227364Sadrian * to see what the TID should be. If it's a non-QoS frame, the 1893227364Sadrian * AC and TID are overridden. The TID/TXQ code assumes the 1894227364Sadrian * TID is on a predictable hardware TXQ, so we don't support 1895227364Sadrian * having a node TID queued to multiple hardware TXQs. 1896227364Sadrian * This may change in the future but would require some locking 1897227364Sadrian * fudgery. 1898218065Sadrian */ 1899227364Sadrian pri = ath_tx_getac(sc, m0); 1900227364Sadrian tid = ath_tx_gettid(sc, m0); 1901218065Sadrian 1902227364Sadrian txq = sc->sc_ac2q[pri]; 1903227364Sadrian wh = mtod(m0, struct ieee80211_frame *); 1904227364Sadrian ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 1905227364Sadrian type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 1906227364Sadrian subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 1907227364Sadrian 1908232764Sadrian /* 1909232764Sadrian * Enforce how deep the multicast queue can grow. 1910232764Sadrian * 1911232764Sadrian * XXX duplicated in ath_raw_xmit(). 1912232764Sadrian */ 1913232764Sadrian if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1914248750Sadrian if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 1915248750Sadrian > sc->sc_txq_mcastq_maxdepth) { 1916232764Sadrian sc->sc_stats.ast_tx_mcastq_overflow++; 1917232764Sadrian m_freem(m0); 1918250325Sadrian return (ENOBUFS); 1919232764Sadrian } 1920232764Sadrian } 1921232764Sadrian 1922250665Sadrian /* 1923250665Sadrian * Enforce how deep the unicast queue can grow. 1924250665Sadrian * 1925250665Sadrian * If the node is in power save then we don't want 1926250665Sadrian * the software queue to grow too deep, or a node may 1927250665Sadrian * end up consuming all of the ath_buf entries. 1928250665Sadrian * 1929250665Sadrian * For now, only do this for DATA frames. 1930250665Sadrian * 1931250665Sadrian * We will want to cap how many management/control 1932250665Sadrian * frames get punted to the software queue so it doesn't 1933250665Sadrian * fill up. But the correct solution isn't yet obvious. 1934250665Sadrian * In any case, this check should at least let frames pass 1935250665Sadrian * that we are direct-dispatching. 1936250665Sadrian * 1937250665Sadrian * XXX TODO: duplicate this to the raw xmit path! 1938250665Sadrian */ 1939250665Sadrian if (type == IEEE80211_FC0_TYPE_DATA && 1940250665Sadrian ATH_NODE(ni)->an_is_powersave && 1941250665Sadrian ATH_NODE(ni)->an_swq_depth > 1942250665Sadrian sc->sc_txq_node_psq_maxdepth) { 1943250665Sadrian sc->sc_stats.ast_tx_node_psq_overflow++; 1944250665Sadrian m_freem(m0); 1945250665Sadrian return (ENOBUFS); 1946250665Sadrian } 1947250665Sadrian 1948227364Sadrian /* A-MPDU TX */ 1949227364Sadrian is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); 1950227364Sadrian is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); 1951227364Sadrian is_ampdu = is_ampdu_tx | is_ampdu_pending; 1952227364Sadrian 1953236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", 1954236872Sadrian __func__, tid, pri, is_ampdu); 1955227364Sadrian 1956239051Sadrian /* Set local packet state, used to queue packets to hardware */ 1957239051Sadrian bf->bf_state.bfs_tid = tid; 1958244109Sadrian bf->bf_state.bfs_tx_queue = txq->axq_qnum; 1959239051Sadrian bf->bf_state.bfs_pri = pri; 1960239051Sadrian 1961248671Sadrian#if 1 1962232753Sadrian /* 1963234009Sadrian * When servicing one or more stations in power-save mode 1964234009Sadrian * (or) if there is some mcast data waiting on the mcast 1965234009Sadrian * queue (to prevent out of order delivery) multicast frames 1966234009Sadrian * must be bufferd until after the beacon. 1967234009Sadrian * 1968234009Sadrian * TODO: we should lock the mcastq before we check the length. 1969232753Sadrian */ 1970248671Sadrian if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { 1971227364Sadrian txq = &avp->av_mcastq; 1972239051Sadrian /* 1973239051Sadrian * Mark the frame as eventually belonging on the CAB 1974239051Sadrian * queue, so the descriptor setup functions will 1975239051Sadrian * correctly initialise the descriptor 'qcuId' field. 1976239051Sadrian */ 1977244109Sadrian bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; 1978239051Sadrian } 1979248671Sadrian#endif 1980227364Sadrian 1981227364Sadrian /* Do the generic frame setup */ 1982227364Sadrian /* XXX should just bzero the bf_state? */ 1983227364Sadrian bf->bf_state.bfs_dobaw = 0; 1984227364Sadrian 1985227364Sadrian /* A-MPDU TX? Manually set sequence number */ 1986236880Sadrian /* 1987236880Sadrian * Don't do it whilst pending; the net80211 layer still 1988236880Sadrian * assigns them. 1989236880Sadrian */ 1990227364Sadrian if (is_ampdu_tx) { 1991227364Sadrian /* 1992227364Sadrian * Always call; this function will 1993227364Sadrian * handle making sure that null data frames 1994227364Sadrian * don't get a sequence number from the current 1995227364Sadrian * TID and thus mess with the BAW. 1996227364Sadrian */ 1997236872Sadrian seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); 1998236874Sadrian 1999236874Sadrian /* 2000236874Sadrian * Don't add QoS NULL frames to the BAW. 2001236874Sadrian */ 2002236872Sadrian if (IEEE80211_QOS_HAS_SEQ(wh) && 2003236872Sadrian subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2004227364Sadrian bf->bf_state.bfs_dobaw = 1; 2005227364Sadrian } 2006218240Sadrian } 2007218240Sadrian 2008218065Sadrian /* 2009227364Sadrian * If needed, the sequence number has been assigned. 2010227364Sadrian * Squirrel it away somewhere easy to get to. 2011218065Sadrian */ 2012236872Sadrian bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; 2013227364Sadrian 2014227364Sadrian /* Is ampdu pending? fetch the seqno and print it out */ 2015227364Sadrian if (is_ampdu_pending) 2016227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 2017227364Sadrian "%s: tid %d: ampdu pending, seqno %d\n", 2018227364Sadrian __func__, tid, M_SEQNO_GET(m0)); 2019227364Sadrian 2020227364Sadrian /* This also sets up the DMA map */ 2021234009Sadrian r = ath_tx_normal_setup(sc, ni, bf, m0, txq); 2022227364Sadrian 2023227364Sadrian if (r != 0) 2024236880Sadrian goto done; 2025227364Sadrian 2026227364Sadrian /* At this point m0 could have changed! */ 2027227364Sadrian m0 = bf->bf_m; 2028227364Sadrian 2029227364Sadrian#if 1 2030218065Sadrian /* 2031227364Sadrian * If it's a multicast frame, do a direct-dispatch to the 2032227364Sadrian * destination hardware queue. Don't bother software 2033227364Sadrian * queuing it. 2034218065Sadrian */ 2035227364Sadrian /* 2036227364Sadrian * If it's a BAR frame, do a direct dispatch to the 2037227364Sadrian * destination hardware queue. Don't bother software 2038227364Sadrian * queuing it, as the TID will now be paused. 2039227364Sadrian * Sending a BAR frame can occur from the net80211 txa timer 2040227364Sadrian * (ie, retries) or from the ath txtask (completion call.) 2041227364Sadrian * It queues directly to hardware because the TID is paused 2042227364Sadrian * at this point (and won't be unpaused until the BAR has 2043227364Sadrian * either been TXed successfully or max retries has been 2044227364Sadrian * reached.) 2045227364Sadrian */ 2046250665Sadrian /* 2047250665Sadrian * Until things are better debugged - if this node is asleep 2048250665Sadrian * and we're sending it a non-BAR frame, direct dispatch it. 2049250665Sadrian * Why? Because we need to figure out what's actually being 2050250665Sadrian * sent - eg, during reassociation/reauthentication after 2051250665Sadrian * the node (last) disappeared whilst asleep, the driver should 2052250665Sadrian * have unpaused/unsleep'ed the node. So until that is 2053250665Sadrian * sorted out, use this workaround. 2054250665Sadrian */ 2055227364Sadrian if (txq == &avp->av_mcastq) { 2056235774Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 2057233227Sadrian "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); 2058240724Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2059227364Sadrian ath_tx_xmit_normal(sc, txq, bf); 2060250665Sadrian } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2061250665Sadrian &queue_to_head)) { 2062250665Sadrian ath_tx_swq(sc, ni, txq, queue_to_head, bf); 2063250665Sadrian } else { 2064240724Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2065227364Sadrian ath_tx_xmit_normal(sc, txq, bf); 2066227364Sadrian } 2067227364Sadrian#else 2068227364Sadrian /* 2069227364Sadrian * For now, since there's no software queue, 2070227364Sadrian * direct-dispatch to the hardware. 2071227364Sadrian */ 2072240724Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2073250665Sadrian /* 2074250665Sadrian * Update the current leak count if 2075250665Sadrian * we're leaking frames; and set the 2076250665Sadrian * MORE flag as appropriate. 2077250665Sadrian */ 2078250665Sadrian ath_tx_leak_count_update(sc, tid, bf); 2079227364Sadrian ath_tx_xmit_normal(sc, txq, bf); 2080236880Sadrian#endif 2081236880Sadriandone: 2082218065Sadrian return 0; 2083218065Sadrian} 2084218065Sadrian 2085218065Sadrianstatic int 2086218065Sadrianath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, 2087218065Sadrian struct ath_buf *bf, struct mbuf *m0, 2088218065Sadrian const struct ieee80211_bpf_params *params) 2089218065Sadrian{ 2090218065Sadrian struct ifnet *ifp = sc->sc_ifp; 2091218065Sadrian struct ieee80211com *ic = ifp->if_l2com; 2092218065Sadrian struct ath_hal *ah = sc->sc_ah; 2093218065Sadrian struct ieee80211vap *vap = ni->ni_vap; 2094218065Sadrian int error, ismcast, ismrr; 2095218065Sadrian int keyix, hdrlen, pktlen, try0, txantenna; 2096227364Sadrian u_int8_t rix, txrate; 2097218065Sadrian struct ieee80211_frame *wh; 2098227364Sadrian u_int flags; 2099218065Sadrian HAL_PKT_TYPE atype; 2100218065Sadrian const HAL_RATE_TABLE *rt; 2101218065Sadrian struct ath_desc *ds; 2102218065Sadrian u_int pri; 2103227364Sadrian int o_tid = -1; 2104227364Sadrian int do_override; 2105250665Sadrian uint8_t type, subtype; 2106250665Sadrian int queue_to_head; 2107218065Sadrian 2108243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2109243786Sadrian 2110218065Sadrian wh = mtod(m0, struct ieee80211_frame *); 2111218065Sadrian ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2112218065Sadrian hdrlen = ieee80211_anyhdrsize(wh); 2113218065Sadrian /* 2114218065Sadrian * Packet length must not include any 2115218065Sadrian * pad bytes; deduct them here. 2116218065Sadrian */ 2117218065Sadrian /* XXX honor IEEE80211_BPF_DATAPAD */ 2118218065Sadrian pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; 2119218065Sadrian 2120250665Sadrian type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2121250665Sadrian subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2122250665Sadrian 2123240899Sadrian ATH_KTR(sc, ATH_KTR_TX, 2, 2124240899Sadrian "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); 2125240899Sadrian 2126227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", 2127227364Sadrian __func__, ismcast); 2128227364Sadrian 2129236880Sadrian pri = params->ibp_pri & 3; 2130236880Sadrian /* Override pri if the frame isn't a QoS one */ 2131236880Sadrian if (! IEEE80211_QOS_HAS_SEQ(wh)) 2132236880Sadrian pri = ath_tx_getac(sc, m0); 2133236880Sadrian 2134236880Sadrian /* XXX If it's an ADDBA, override the correct queue */ 2135236880Sadrian do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); 2136236880Sadrian 2137236880Sadrian /* Map ADDBA to the correct priority */ 2138236880Sadrian if (do_override) { 2139236880Sadrian#if 0 2140259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 2141236880Sadrian "%s: overriding tid %d pri %d -> %d\n", 2142236880Sadrian __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); 2143236880Sadrian#endif 2144236880Sadrian pri = TID_TO_WME_AC(o_tid); 2145236880Sadrian } 2146236880Sadrian 2147218154Sadrian /* Handle encryption twiddling if needed */ 2148227364Sadrian if (! ath_tx_tag_crypto(sc, ni, 2149227364Sadrian m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, 2150227364Sadrian &hdrlen, &pktlen, &keyix)) { 2151218154Sadrian ath_freetx(m0); 2152218154Sadrian return EIO; 2153218154Sadrian } 2154218154Sadrian /* packet header may have moved, reset our local pointer */ 2155218154Sadrian wh = mtod(m0, struct ieee80211_frame *); 2156218065Sadrian 2157227364Sadrian /* Do the generic frame setup */ 2158227364Sadrian /* XXX should just bzero the bf_state? */ 2159227364Sadrian bf->bf_state.bfs_dobaw = 0; 2160227364Sadrian 2161218065Sadrian error = ath_tx_dmasetup(sc, bf, m0); 2162218065Sadrian if (error != 0) 2163218065Sadrian return error; 2164218065Sadrian m0 = bf->bf_m; /* NB: may have changed */ 2165218065Sadrian wh = mtod(m0, struct ieee80211_frame *); 2166218065Sadrian bf->bf_node = ni; /* NB: held reference */ 2167218065Sadrian 2168240724Sadrian /* Always enable CLRDMASK for raw frames for now.. */ 2169218065Sadrian flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ 2170218065Sadrian flags |= HAL_TXDESC_INTREQ; /* force interrupt */ 2171218065Sadrian if (params->ibp_flags & IEEE80211_BPF_RTS) 2172218065Sadrian flags |= HAL_TXDESC_RTSENA; 2173227364Sadrian else if (params->ibp_flags & IEEE80211_BPF_CTS) { 2174227364Sadrian /* XXX assume 11g/11n protection? */ 2175227364Sadrian bf->bf_state.bfs_doprot = 1; 2176218065Sadrian flags |= HAL_TXDESC_CTSENA; 2177227364Sadrian } 2178218065Sadrian /* XXX leave ismcast to injector? */ 2179218065Sadrian if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) 2180218065Sadrian flags |= HAL_TXDESC_NOACK; 2181218065Sadrian 2182218065Sadrian rt = sc->sc_currates; 2183218065Sadrian KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 2184218065Sadrian rix = ath_tx_findrix(sc, params->ibp_rate0); 2185218065Sadrian txrate = rt->info[rix].rateCode; 2186218065Sadrian if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) 2187218065Sadrian txrate |= rt->info[rix].shortPreamble; 2188218065Sadrian sc->sc_txrix = rix; 2189218065Sadrian try0 = params->ibp_try0; 2190218065Sadrian ismrr = (params->ibp_try1 != 0); 2191218065Sadrian txantenna = params->ibp_pri >> 2; 2192218065Sadrian if (txantenna == 0) /* XXX? */ 2193218065Sadrian txantenna = sc->sc_txantenna; 2194218157Sadrian 2195227364Sadrian /* 2196227364Sadrian * Since ctsrate is fixed, store it away for later 2197227364Sadrian * use when the descriptor fields are being set. 2198227364Sadrian */ 2199227364Sadrian if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) 2200227364Sadrian bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; 2201218157Sadrian 2202218065Sadrian /* 2203218065Sadrian * NB: we mark all packets as type PSPOLL so the h/w won't 2204218065Sadrian * set the sequence number, duration, etc. 2205218065Sadrian */ 2206218065Sadrian atype = HAL_PKT_TYPE_PSPOLL; 2207218065Sadrian 2208218065Sadrian if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 2209218065Sadrian ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, 2210218065Sadrian sc->sc_hwmap[rix].ieeerate, -1); 2211218065Sadrian 2212218065Sadrian if (ieee80211_radiotap_active_vap(vap)) { 2213218065Sadrian u_int64_t tsf = ath_hal_gettsf64(ah); 2214218065Sadrian 2215218065Sadrian sc->sc_tx_th.wt_tsf = htole64(tsf); 2216218065Sadrian sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; 2217262007Skevlo if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) 2218218065Sadrian sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2219218065Sadrian if (m0->m_flags & M_FRAG) 2220218065Sadrian sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; 2221218065Sadrian sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; 2222249569Sadrian sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, 2223249569Sadrian ieee80211_get_node_txpower(ni)); 2224218065Sadrian sc->sc_tx_th.wt_antenna = sc->sc_txantenna; 2225218065Sadrian 2226218065Sadrian ieee80211_radiotap_tx(vap, m0); 2227218065Sadrian } 2228218065Sadrian 2229218065Sadrian /* 2230218065Sadrian * Formulate first tx descriptor with tx controls. 2231218065Sadrian */ 2232218065Sadrian ds = bf->bf_desc; 2233218065Sadrian /* XXX check return value? */ 2234227364Sadrian 2235227364Sadrian /* Store the decided rate index values away */ 2236227364Sadrian bf->bf_state.bfs_pktlen = pktlen; 2237227364Sadrian bf->bf_state.bfs_hdrlen = hdrlen; 2238227364Sadrian bf->bf_state.bfs_atype = atype; 2239249569Sadrian bf->bf_state.bfs_txpower = MIN(params->ibp_power, 2240249569Sadrian ieee80211_get_node_txpower(ni)); 2241227364Sadrian bf->bf_state.bfs_txrate0 = txrate; 2242227364Sadrian bf->bf_state.bfs_try0 = try0; 2243227364Sadrian bf->bf_state.bfs_keyix = keyix; 2244227364Sadrian bf->bf_state.bfs_txantenna = txantenna; 2245233966Sadrian bf->bf_state.bfs_txflags = flags; 2246227364Sadrian bf->bf_state.bfs_shpream = 2247227364Sadrian !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); 2248218065Sadrian 2249239051Sadrian /* Set local packet state, used to queue packets to hardware */ 2250239051Sadrian bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); 2251244109Sadrian bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; 2252239051Sadrian bf->bf_state.bfs_pri = pri; 2253239051Sadrian 2254227364Sadrian /* XXX this should be done in ath_tx_setrate() */ 2255227364Sadrian bf->bf_state.bfs_ctsrate = 0; 2256227364Sadrian bf->bf_state.bfs_ctsduration = 0; 2257227364Sadrian bf->bf_state.bfs_ismrr = ismrr; 2258218240Sadrian 2259227364Sadrian /* Blank the legacy rate array */ 2260227364Sadrian bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); 2261218240Sadrian 2262227364Sadrian bf->bf_state.bfs_rc[0].rix = 2263227364Sadrian ath_tx_findrix(sc, params->ibp_rate0); 2264227364Sadrian bf->bf_state.bfs_rc[0].tries = try0; 2265227364Sadrian bf->bf_state.bfs_rc[0].ratecode = txrate; 2266227364Sadrian 2267227364Sadrian if (ismrr) { 2268227364Sadrian int rix; 2269227364Sadrian 2270227364Sadrian rix = ath_tx_findrix(sc, params->ibp_rate1); 2271227364Sadrian bf->bf_state.bfs_rc[1].rix = rix; 2272227364Sadrian bf->bf_state.bfs_rc[1].tries = params->ibp_try1; 2273227364Sadrian 2274227364Sadrian rix = ath_tx_findrix(sc, params->ibp_rate2); 2275227364Sadrian bf->bf_state.bfs_rc[2].rix = rix; 2276227364Sadrian bf->bf_state.bfs_rc[2].tries = params->ibp_try2; 2277227364Sadrian 2278227364Sadrian rix = ath_tx_findrix(sc, params->ibp_rate3); 2279227364Sadrian bf->bf_state.bfs_rc[3].rix = rix; 2280227364Sadrian bf->bf_state.bfs_rc[3].tries = params->ibp_try3; 2281218065Sadrian } 2282227364Sadrian /* 2283227364Sadrian * All the required rate control decisions have been made; 2284227364Sadrian * fill in the rc flags. 2285227364Sadrian */ 2286227364Sadrian ath_tx_rate_fill_rcflags(sc, bf); 2287218065Sadrian 2288227364Sadrian /* NB: no buffered multicast in power save support */ 2289227364Sadrian 2290227364Sadrian /* 2291227364Sadrian * If we're overiding the ADDBA destination, dump directly 2292227364Sadrian * into the hardware queue, right after any pending 2293227364Sadrian * frames to that node are. 2294227364Sadrian */ 2295227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", 2296227364Sadrian __func__, do_override); 2297227364Sadrian 2298240882Sadrian#if 1 2299250665Sadrian /* 2300250665Sadrian * Put addba frames in the right place in the right TID/HWQ. 2301250665Sadrian */ 2302227364Sadrian if (do_override) { 2303240724Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2304250665Sadrian /* 2305250665Sadrian * XXX if it's addba frames, should we be leaking 2306250665Sadrian * them out via the frame leak method? 2307250665Sadrian * XXX for now let's not risk it; but we may wish 2308250665Sadrian * to investigate this later. 2309250665Sadrian */ 2310227364Sadrian ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2311250665Sadrian } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, 2312250665Sadrian &queue_to_head)) { 2313250665Sadrian /* Queue to software queue */ 2314250665Sadrian ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); 2315227364Sadrian } else { 2316250665Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2317250665Sadrian ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2318227364Sadrian } 2319240882Sadrian#else 2320240882Sadrian /* Direct-dispatch to the hardware */ 2321240882Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 2322250665Sadrian /* 2323250665Sadrian * Update the current leak count if 2324250665Sadrian * we're leaking frames; and set the 2325250665Sadrian * MORE flag as appropriate. 2326250665Sadrian */ 2327250665Sadrian ath_tx_leak_count_update(sc, tid, bf); 2328240882Sadrian ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); 2329240882Sadrian#endif 2330218065Sadrian return 0; 2331218065Sadrian} 2332218065Sadrian 2333227364Sadrian/* 2334227364Sadrian * Send a raw frame. 2335227364Sadrian * 2336227364Sadrian * This can be called by net80211. 2337227364Sadrian */ 2338218065Sadrianint 2339218065Sadrianath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2340218065Sadrian const struct ieee80211_bpf_params *params) 2341218065Sadrian{ 2342218065Sadrian struct ieee80211com *ic = ni->ni_ic; 2343218065Sadrian struct ifnet *ifp = ic->ic_ifp; 2344218065Sadrian struct ath_softc *sc = ifp->if_softc; 2345218065Sadrian struct ath_buf *bf; 2346232764Sadrian struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); 2347232764Sadrian int error = 0; 2348218065Sadrian 2349227651Sadrian ATH_PCU_LOCK(sc); 2350227651Sadrian if (sc->sc_inreset_cnt > 0) { 2351259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 2352259341Srpaulo "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2353227651Sadrian error = EIO; 2354227651Sadrian ATH_PCU_UNLOCK(sc); 2355227651Sadrian goto bad0; 2356227651Sadrian } 2357227651Sadrian sc->sc_txstart_cnt++; 2358227651Sadrian ATH_PCU_UNLOCK(sc); 2359227651Sadrian 2360242391Sadrian ATH_TX_LOCK(sc); 2361242391Sadrian 2362218065Sadrian if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { 2363218065Sadrian DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__, 2364218065Sadrian (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ? 2365218065Sadrian "!running" : "invalid"); 2366218065Sadrian m_freem(m); 2367218065Sadrian error = ENETDOWN; 2368218065Sadrian goto bad; 2369218065Sadrian } 2370232764Sadrian 2371218065Sadrian /* 2372232764Sadrian * Enforce how deep the multicast queue can grow. 2373232764Sadrian * 2374232764Sadrian * XXX duplicated in ath_tx_start(). 2375232764Sadrian */ 2376232764Sadrian if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 2377248750Sadrian if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth 2378248750Sadrian > sc->sc_txq_mcastq_maxdepth) { 2379232764Sadrian sc->sc_stats.ast_tx_mcastq_overflow++; 2380232764Sadrian error = ENOBUFS; 2381232764Sadrian } 2382232764Sadrian 2383232764Sadrian if (error != 0) { 2384232764Sadrian m_freem(m); 2385232764Sadrian goto bad; 2386232764Sadrian } 2387232764Sadrian } 2388232764Sadrian 2389232764Sadrian /* 2390218065Sadrian * Grab a TX buffer and associated resources. 2391218065Sadrian */ 2392237000Sadrian bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 2393218065Sadrian if (bf == NULL) { 2394218065Sadrian sc->sc_stats.ast_tx_nobuf++; 2395218065Sadrian m_freem(m); 2396218065Sadrian error = ENOBUFS; 2397218065Sadrian goto bad; 2398218065Sadrian } 2399240899Sadrian ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", 2400240899Sadrian m, params, bf); 2401218065Sadrian 2402218065Sadrian if (params == NULL) { 2403218065Sadrian /* 2404218065Sadrian * Legacy path; interpret frame contents to decide 2405218065Sadrian * precisely how to send the frame. 2406218065Sadrian */ 2407218065Sadrian if (ath_tx_start(sc, ni, bf, m)) { 2408218065Sadrian error = EIO; /* XXX */ 2409218065Sadrian goto bad2; 2410218065Sadrian } 2411218065Sadrian } else { 2412218065Sadrian /* 2413218065Sadrian * Caller supplied explicit parameters to use in 2414218065Sadrian * sending the frame. 2415218065Sadrian */ 2416218065Sadrian if (ath_tx_raw_start(sc, ni, bf, m, params)) { 2417218065Sadrian error = EIO; /* XXX */ 2418218065Sadrian goto bad2; 2419218065Sadrian } 2420218065Sadrian } 2421218065Sadrian sc->sc_wd_timer = 5; 2422218065Sadrian ifp->if_opackets++; 2423218065Sadrian sc->sc_stats.ast_tx_raw++; 2424218065Sadrian 2425242271Sadrian /* 2426242271Sadrian * Update the TIM - if there's anything queued to the 2427242271Sadrian * software queue and power save is enabled, we should 2428242271Sadrian * set the TIM. 2429242271Sadrian */ 2430242271Sadrian ath_tx_update_tim(sc, ni, 1); 2431242271Sadrian 2432243787Sadrian ATH_TX_UNLOCK(sc); 2433243787Sadrian 2434227651Sadrian ATH_PCU_LOCK(sc); 2435227651Sadrian sc->sc_txstart_cnt--; 2436227651Sadrian ATH_PCU_UNLOCK(sc); 2437227651Sadrian 2438218065Sadrian return 0; 2439218065Sadrianbad2: 2440240899Sadrian ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " 2441240899Sadrian "bf=%p", 2442240899Sadrian m, 2443240899Sadrian params, 2444240899Sadrian bf); 2445218065Sadrian ATH_TXBUF_LOCK(sc); 2446236993Sadrian ath_returnbuf_head(sc, bf); 2447218065Sadrian ATH_TXBUF_UNLOCK(sc); 2448218065Sadrianbad: 2449242391Sadrian 2450242391Sadrian ATH_TX_UNLOCK(sc); 2451242391Sadrian 2452227651Sadrian ATH_PCU_LOCK(sc); 2453227651Sadrian sc->sc_txstart_cnt--; 2454227651Sadrian ATH_PCU_UNLOCK(sc); 2455227651Sadrianbad0: 2456240899Sadrian ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", 2457240899Sadrian m, params); 2458218065Sadrian ifp->if_oerrors++; 2459218065Sadrian sc->sc_stats.ast_tx_raw_fail++; 2460218065Sadrian ieee80211_free_node(ni); 2461227651Sadrian 2462218065Sadrian return error; 2463218065Sadrian} 2464227364Sadrian 2465227364Sadrian/* Some helper functions */ 2466227364Sadrian 2467227364Sadrian/* 2468227364Sadrian * ADDBA (and potentially others) need to be placed in the same 2469227364Sadrian * hardware queue as the TID/node it's relating to. This is so 2470227364Sadrian * it goes out after any pending non-aggregate frames to the 2471227364Sadrian * same node/TID. 2472227364Sadrian * 2473227364Sadrian * If this isn't done, the ADDBA can go out before the frames 2474227364Sadrian * queued in hardware. Even though these frames have a sequence 2475227364Sadrian * number -earlier- than the ADDBA can be transmitted (but 2476227364Sadrian * no frames whose sequence numbers are after the ADDBA should 2477227364Sadrian * be!) they'll arrive after the ADDBA - and the receiving end 2478227364Sadrian * will simply drop them as being out of the BAW. 2479227364Sadrian * 2480227364Sadrian * The frames can't be appended to the TID software queue - it'll 2481227364Sadrian * never be sent out. So these frames have to be directly 2482227364Sadrian * dispatched to the hardware, rather than queued in software. 2483227364Sadrian * So if this function returns true, the TXQ has to be 2484227364Sadrian * overridden and it has to be directly dispatched. 2485227364Sadrian * 2486227364Sadrian * It's a dirty hack, but someone's gotta do it. 2487227364Sadrian */ 2488227364Sadrian 2489227364Sadrian/* 2490227364Sadrian * XXX doesn't belong here! 2491227364Sadrian */ 2492227364Sadrianstatic int 2493227364Sadrianieee80211_is_action(struct ieee80211_frame *wh) 2494227364Sadrian{ 2495227364Sadrian /* Type: Management frame? */ 2496227364Sadrian if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != 2497227364Sadrian IEEE80211_FC0_TYPE_MGT) 2498227364Sadrian return 0; 2499227364Sadrian 2500227364Sadrian /* Subtype: Action frame? */ 2501227364Sadrian if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != 2502227364Sadrian IEEE80211_FC0_SUBTYPE_ACTION) 2503227364Sadrian return 0; 2504227364Sadrian 2505227364Sadrian return 1; 2506227364Sadrian} 2507227364Sadrian 2508227364Sadrian#define MS(_v, _f) (((_v) & _f) >> _f##_S) 2509227364Sadrian/* 2510227364Sadrian * Return an alternate TID for ADDBA request frames. 2511227364Sadrian * 2512227364Sadrian * Yes, this likely should be done in the net80211 layer. 2513227364Sadrian */ 2514227364Sadrianstatic int 2515227364Sadrianath_tx_action_frame_override_queue(struct ath_softc *sc, 2516227364Sadrian struct ieee80211_node *ni, 2517227364Sadrian struct mbuf *m0, int *tid) 2518227364Sadrian{ 2519227364Sadrian struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); 2520227364Sadrian struct ieee80211_action_ba_addbarequest *ia; 2521227364Sadrian uint8_t *frm; 2522227364Sadrian uint16_t baparamset; 2523227364Sadrian 2524227364Sadrian /* Not action frame? Bail */ 2525227364Sadrian if (! ieee80211_is_action(wh)) 2526227364Sadrian return 0; 2527227364Sadrian 2528227364Sadrian /* XXX Not needed for frames we send? */ 2529227364Sadrian#if 0 2530227364Sadrian /* Correct length? */ 2531227364Sadrian if (! ieee80211_parse_action(ni, m)) 2532227364Sadrian return 0; 2533227364Sadrian#endif 2534227364Sadrian 2535227364Sadrian /* Extract out action frame */ 2536227364Sadrian frm = (u_int8_t *)&wh[1]; 2537227364Sadrian ia = (struct ieee80211_action_ba_addbarequest *) frm; 2538227364Sadrian 2539227364Sadrian /* Not ADDBA? Bail */ 2540227364Sadrian if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) 2541227364Sadrian return 0; 2542227364Sadrian if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) 2543227364Sadrian return 0; 2544227364Sadrian 2545227364Sadrian /* Extract TID, return it */ 2546227364Sadrian baparamset = le16toh(ia->rq_baparamset); 2547227364Sadrian *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); 2548227364Sadrian 2549227364Sadrian return 1; 2550227364Sadrian} 2551227364Sadrian#undef MS 2552227364Sadrian 2553227364Sadrian/* Per-node software queue operations */ 2554227364Sadrian 2555227364Sadrian/* 2556227364Sadrian * Add the current packet to the given BAW. 2557227364Sadrian * It is assumed that the current packet 2558227364Sadrian * 2559227364Sadrian * + fits inside the BAW; 2560227364Sadrian * + already has had a sequence number allocated. 2561227364Sadrian * 2562227364Sadrian * Since the BAW status may be modified by both the ath task and 2563227364Sadrian * the net80211/ifnet contexts, the TID must be locked. 2564227364Sadrian */ 2565227364Sadrianvoid 2566227364Sadrianath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, 2567227364Sadrian struct ath_tid *tid, struct ath_buf *bf) 2568227364Sadrian{ 2569227364Sadrian int index, cindex; 2570227364Sadrian struct ieee80211_tx_ampdu *tap; 2571227364Sadrian 2572243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2573227364Sadrian 2574227364Sadrian if (bf->bf_state.bfs_isretried) 2575227364Sadrian return; 2576227364Sadrian 2577236886Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 2578236886Sadrian 2579236880Sadrian if (! bf->bf_state.bfs_dobaw) { 2580259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2581236880Sadrian "%s: dobaw=0, seqno=%d, window %d:%d\n", 2582259341Srpaulo __func__, SEQNO(bf->bf_state.bfs_seqno), 2583259341Srpaulo tap->txa_start, tap->txa_wnd); 2584236880Sadrian } 2585236880Sadrian 2586227364Sadrian if (bf->bf_state.bfs_addedbaw) 2587259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2588236872Sadrian "%s: re-added? tid=%d, seqno %d; window %d:%d; " 2589229949Sadrian "baw head=%d tail=%d\n", 2590236872Sadrian __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2591229949Sadrian tap->txa_start, tap->txa_wnd, tid->baw_head, 2592229949Sadrian tid->baw_tail); 2593227364Sadrian 2594227364Sadrian /* 2595236880Sadrian * Verify that the given sequence number is not outside of the 2596236880Sadrian * BAW. Complain loudly if that's the case. 2597236880Sadrian */ 2598236880Sadrian if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2599236880Sadrian SEQNO(bf->bf_state.bfs_seqno))) { 2600259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2601236880Sadrian "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " 2602236880Sadrian "baw head=%d tail=%d\n", 2603236880Sadrian __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2604236880Sadrian tap->txa_start, tap->txa_wnd, tid->baw_head, 2605236880Sadrian tid->baw_tail); 2606236880Sadrian } 2607236880Sadrian 2608236880Sadrian /* 2609227364Sadrian * ni->ni_txseqs[] is the currently allocated seqno. 2610227364Sadrian * the txa state contains the current baw start. 2611227364Sadrian */ 2612227364Sadrian index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); 2613227364Sadrian cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2614227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2615236872Sadrian "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " 2616229949Sadrian "baw head=%d tail=%d\n", 2617236872Sadrian __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), 2618229949Sadrian tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, 2619229949Sadrian tid->baw_tail); 2620227364Sadrian 2621227364Sadrian 2622227364Sadrian#if 0 2623227364Sadrian assert(tid->tx_buf[cindex] == NULL); 2624227364Sadrian#endif 2625227364Sadrian if (tid->tx_buf[cindex] != NULL) { 2626259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2627227364Sadrian "%s: ba packet dup (index=%d, cindex=%d, " 2628227364Sadrian "head=%d, tail=%d)\n", 2629227364Sadrian __func__, index, cindex, tid->baw_head, tid->baw_tail); 2630259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2631227364Sadrian "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", 2632227364Sadrian __func__, 2633227364Sadrian tid->tx_buf[cindex], 2634227364Sadrian SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), 2635227364Sadrian bf, 2636227364Sadrian SEQNO(bf->bf_state.bfs_seqno) 2637227364Sadrian ); 2638227364Sadrian } 2639227364Sadrian tid->tx_buf[cindex] = bf; 2640227364Sadrian 2641229949Sadrian if (index >= ((tid->baw_tail - tid->baw_head) & 2642229949Sadrian (ATH_TID_MAX_BUFS - 1))) { 2643227364Sadrian tid->baw_tail = cindex; 2644227364Sadrian INCR(tid->baw_tail, ATH_TID_MAX_BUFS); 2645227364Sadrian } 2646227364Sadrian} 2647227364Sadrian 2648227364Sadrian/* 2649227398Sadrian * Flip the BAW buffer entry over from the existing one to the new one. 2650227398Sadrian * 2651227398Sadrian * When software retransmitting a (sub-)frame, it is entirely possible that 2652227398Sadrian * the frame ath_buf is marked as BUSY and can't be immediately reused. 2653227398Sadrian * In that instance the buffer is cloned and the new buffer is used for 2654227398Sadrian * retransmit. We thus need to update the ath_buf slot in the BAW buf 2655227398Sadrian * tracking array to maintain consistency. 2656227398Sadrian */ 2657227398Sadrianstatic void 2658227398Sadrianath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, 2659227398Sadrian struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) 2660227398Sadrian{ 2661227398Sadrian int index, cindex; 2662227398Sadrian struct ieee80211_tx_ampdu *tap; 2663227398Sadrian int seqno = SEQNO(old_bf->bf_state.bfs_seqno); 2664227398Sadrian 2665243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2666227398Sadrian 2667227398Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 2668227398Sadrian index = ATH_BA_INDEX(tap->txa_start, seqno); 2669227398Sadrian cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2670227398Sadrian 2671227398Sadrian /* 2672227398Sadrian * Just warn for now; if it happens then we should find out 2673227398Sadrian * about it. It's highly likely the aggregation session will 2674227398Sadrian * soon hang. 2675227398Sadrian */ 2676227398Sadrian if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { 2677259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2678259341Srpaulo "%s: retransmitted buffer" 2679227398Sadrian " has mismatching seqno's, BA session may hang.\n", 2680227398Sadrian __func__); 2681259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2682259341Srpaulo "%s: old seqno=%d, new_seqno=%d\n", __func__, 2683259341Srpaulo old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); 2684227398Sadrian } 2685227398Sadrian 2686227398Sadrian if (tid->tx_buf[cindex] != old_bf) { 2687259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2688259341Srpaulo "%s: ath_buf pointer incorrect; " 2689259341Srpaulo " has m BA session may hang.\n", __func__); 2690259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2691259341Srpaulo "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); 2692227398Sadrian } 2693227398Sadrian 2694227398Sadrian tid->tx_buf[cindex] = new_bf; 2695227398Sadrian} 2696227398Sadrian 2697227398Sadrian/* 2698227364Sadrian * seq_start - left edge of BAW 2699227364Sadrian * seq_next - current/next sequence number to allocate 2700227364Sadrian * 2701227364Sadrian * Since the BAW status may be modified by both the ath task and 2702227364Sadrian * the net80211/ifnet contexts, the TID must be locked. 2703227364Sadrian */ 2704227364Sadrianstatic void 2705227364Sadrianath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, 2706227364Sadrian struct ath_tid *tid, const struct ath_buf *bf) 2707227364Sadrian{ 2708227364Sadrian int index, cindex; 2709227364Sadrian struct ieee80211_tx_ampdu *tap; 2710227364Sadrian int seqno = SEQNO(bf->bf_state.bfs_seqno); 2711227364Sadrian 2712243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2713227364Sadrian 2714227364Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 2715227364Sadrian index = ATH_BA_INDEX(tap->txa_start, seqno); 2716227364Sadrian cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 2717227364Sadrian 2718227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2719236872Sadrian "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " 2720229949Sadrian "baw head=%d, tail=%d\n", 2721236872Sadrian __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, 2722227364Sadrian cindex, tid->baw_head, tid->baw_tail); 2723227364Sadrian 2724227364Sadrian /* 2725227364Sadrian * If this occurs then we have a big problem - something else 2726227364Sadrian * has slid tap->txa_start along without updating the BAW 2727227364Sadrian * tracking start/end pointers. Thus the TX BAW state is now 2728227364Sadrian * completely busted. 2729227364Sadrian * 2730227364Sadrian * But for now, since I haven't yet fixed TDMA and buffer cloning, 2731227364Sadrian * it's quite possible that a cloned buffer is making its way 2732227364Sadrian * here and causing it to fire off. Disable TDMA for now. 2733227364Sadrian */ 2734227364Sadrian if (tid->tx_buf[cindex] != bf) { 2735259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2736227364Sadrian "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", 2737259341Srpaulo __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 2738227364Sadrian tid->tx_buf[cindex], 2739249715Sadrian (tid->tx_buf[cindex] != NULL) ? 2740249715Sadrian SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); 2741227364Sadrian } 2742227364Sadrian 2743227364Sadrian tid->tx_buf[cindex] = NULL; 2744227364Sadrian 2745229949Sadrian while (tid->baw_head != tid->baw_tail && 2746229949Sadrian !tid->tx_buf[tid->baw_head]) { 2747227364Sadrian INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 2748227364Sadrian INCR(tid->baw_head, ATH_TID_MAX_BUFS); 2749227364Sadrian } 2750229949Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 2751229949Sadrian "%s: baw is now %d:%d, baw head=%d\n", 2752227364Sadrian __func__, tap->txa_start, tap->txa_wnd, tid->baw_head); 2753227364Sadrian} 2754227364Sadrian 2755250665Sadrianstatic void 2756250665Sadrianath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, 2757250665Sadrian struct ath_buf *bf) 2758250665Sadrian{ 2759250665Sadrian struct ieee80211_frame *wh; 2760250665Sadrian 2761250665Sadrian ATH_TX_LOCK_ASSERT(sc); 2762250665Sadrian 2763250665Sadrian if (tid->an->an_leak_count > 0) { 2764250665Sadrian wh = mtod(bf->bf_m, struct ieee80211_frame *); 2765250665Sadrian 2766250665Sadrian /* 2767250665Sadrian * Update MORE based on the software/net80211 queue states. 2768250665Sadrian */ 2769250665Sadrian if ((tid->an->an_stack_psq > 0) 2770250665Sadrian || (tid->an->an_swq_depth > 0)) 2771250665Sadrian wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; 2772250665Sadrian else 2773250665Sadrian wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; 2774250665Sadrian 2775250665Sadrian DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 2776250665Sadrian "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", 2777250665Sadrian __func__, 2778250665Sadrian tid->an->an_node.ni_macaddr, 2779250665Sadrian ":", 2780250665Sadrian tid->an->an_leak_count, 2781250665Sadrian tid->an->an_stack_psq, 2782250665Sadrian tid->an->an_swq_depth, 2783250665Sadrian !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); 2784250665Sadrian 2785250665Sadrian /* 2786250665Sadrian * Re-sync the underlying buffer. 2787250665Sadrian */ 2788250665Sadrian bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 2789250665Sadrian BUS_DMASYNC_PREWRITE); 2790250665Sadrian 2791250665Sadrian tid->an->an_leak_count --; 2792250665Sadrian } 2793250665Sadrian} 2794250665Sadrian 2795250665Sadrianstatic int 2796250665Sadrianath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) 2797250665Sadrian{ 2798250665Sadrian 2799250665Sadrian ATH_TX_LOCK_ASSERT(sc); 2800250665Sadrian 2801250665Sadrian if (tid->an->an_leak_count > 0) { 2802250665Sadrian return (1); 2803250665Sadrian } 2804250665Sadrian if (tid->paused) 2805250665Sadrian return (0); 2806250665Sadrian return (1); 2807250665Sadrian} 2808250665Sadrian 2809227364Sadrian/* 2810227364Sadrian * Mark the current node/TID as ready to TX. 2811227364Sadrian * 2812227364Sadrian * This is done to make it easy for the software scheduler to 2813227364Sadrian * find which nodes have data to send. 2814227364Sadrian * 2815227364Sadrian * The TXQ lock must be held. 2816227364Sadrian */ 2817250665Sadrianvoid 2818227364Sadrianath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) 2819227364Sadrian{ 2820227364Sadrian struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2821227364Sadrian 2822243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2823227364Sadrian 2824250665Sadrian /* 2825250665Sadrian * If we are leaking out a frame to this destination 2826250665Sadrian * for PS-POLL, ensure that we allow scheduling to 2827250665Sadrian * occur. 2828250665Sadrian */ 2829250665Sadrian if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 2830227364Sadrian return; /* paused, can't schedule yet */ 2831227364Sadrian 2832227364Sadrian if (tid->sched) 2833227364Sadrian return; /* already scheduled */ 2834227364Sadrian 2835227364Sadrian tid->sched = 1; 2836227364Sadrian 2837250665Sadrian#if 0 2838250665Sadrian /* 2839250665Sadrian * If this is a sleeping node we're leaking to, given 2840250665Sadrian * it a higher priority. This is so bad for QoS it hurts. 2841250665Sadrian */ 2842250665Sadrian if (tid->an->an_leak_count) { 2843250665Sadrian TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); 2844250665Sadrian } else { 2845250665Sadrian TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2846250665Sadrian } 2847250665Sadrian#endif 2848250665Sadrian 2849250665Sadrian /* 2850250665Sadrian * We can't do the above - it'll confuse the TXQ software 2851250665Sadrian * scheduler which will keep checking the _head_ TID 2852250665Sadrian * in the list to see if it has traffic. If we queue 2853250665Sadrian * a TID to the head of the list and it doesn't transmit, 2854250665Sadrian * we'll check it again. 2855250665Sadrian * 2856250665Sadrian * So, get the rest of this leaking frames support working 2857250665Sadrian * and reliable first and _then_ optimise it so they're 2858250665Sadrian * pushed out in front of any other pending software 2859250665Sadrian * queued nodes. 2860250665Sadrian */ 2861227364Sadrian TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); 2862227364Sadrian} 2863227364Sadrian 2864227364Sadrian/* 2865227364Sadrian * Mark the current node as no longer needing to be polled for 2866227364Sadrian * TX packets. 2867227364Sadrian * 2868227364Sadrian * The TXQ lock must be held. 2869227364Sadrian */ 2870227364Sadrianstatic void 2871227364Sadrianath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) 2872227364Sadrian{ 2873227364Sadrian struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 2874227364Sadrian 2875243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2876227364Sadrian 2877227364Sadrian if (tid->sched == 0) 2878227364Sadrian return; 2879227364Sadrian 2880227364Sadrian tid->sched = 0; 2881227364Sadrian TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); 2882227364Sadrian} 2883227364Sadrian 2884227364Sadrian/* 2885227364Sadrian * Assign a sequence number manually to the given frame. 2886227364Sadrian * 2887227364Sadrian * This should only be called for A-MPDU TX frames. 2888227364Sadrian */ 2889236872Sadrianstatic ieee80211_seq 2890227364Sadrianath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, 2891227364Sadrian struct ath_buf *bf, struct mbuf *m0) 2892227364Sadrian{ 2893227364Sadrian struct ieee80211_frame *wh; 2894227364Sadrian int tid, pri; 2895227364Sadrian ieee80211_seq seqno; 2896227364Sadrian uint8_t subtype; 2897227364Sadrian 2898227364Sadrian /* TID lookup */ 2899227364Sadrian wh = mtod(m0, struct ieee80211_frame *); 2900227364Sadrian pri = M_WME_GETAC(m0); /* honor classification */ 2901227364Sadrian tid = WME_AC_TO_TID(pri); 2902236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", 2903236872Sadrian __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 2904227364Sadrian 2905227364Sadrian /* XXX Is it a control frame? Ignore */ 2906227364Sadrian 2907227364Sadrian /* Does the packet require a sequence number? */ 2908227364Sadrian if (! IEEE80211_QOS_HAS_SEQ(wh)) 2909227364Sadrian return -1; 2910227364Sadrian 2911243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2912236880Sadrian 2913227364Sadrian /* 2914227364Sadrian * Is it a QOS NULL Data frame? Give it a sequence number from 2915227364Sadrian * the default TID (IEEE80211_NONQOS_TID.) 2916227364Sadrian * 2917227364Sadrian * The RX path of everything I've looked at doesn't include the NULL 2918227364Sadrian * data frame sequence number in the aggregation state updates, so 2919227364Sadrian * assigning it a sequence number there will cause a BAW hole on the 2920227364Sadrian * RX side. 2921227364Sadrian */ 2922227364Sadrian subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2923227364Sadrian if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { 2924236880Sadrian /* XXX no locking for this TID? This is a bit of a problem. */ 2925227364Sadrian seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; 2926227364Sadrian INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); 2927227364Sadrian } else { 2928227364Sadrian /* Manually assign sequence number */ 2929227364Sadrian seqno = ni->ni_txseqs[tid]; 2930227364Sadrian INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); 2931227364Sadrian } 2932227364Sadrian *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 2933227364Sadrian M_SEQNO_SET(m0, seqno); 2934227364Sadrian 2935227364Sadrian /* Return so caller can do something with it if needed */ 2936236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); 2937227364Sadrian return seqno; 2938227364Sadrian} 2939227364Sadrian 2940227364Sadrian/* 2941227364Sadrian * Attempt to direct dispatch an aggregate frame to hardware. 2942227364Sadrian * If the frame is out of BAW, queue. 2943227364Sadrian * Otherwise, schedule it as a single frame. 2944227364Sadrian */ 2945227364Sadrianstatic void 2946239051Sadrianath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, 2947239051Sadrian struct ath_txq *txq, struct ath_buf *bf) 2948227364Sadrian{ 2949227364Sadrian struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; 2950227364Sadrian struct ieee80211_tx_ampdu *tap; 2951227364Sadrian 2952243786Sadrian ATH_TX_LOCK_ASSERT(sc); 2953227364Sadrian 2954227364Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 2955227364Sadrian 2956227364Sadrian /* paused? queue */ 2957250665Sadrian if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 2958241336Sadrian ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2959233480Sadrian /* XXX don't sched - we're paused! */ 2960227364Sadrian return; 2961227364Sadrian } 2962227364Sadrian 2963227364Sadrian /* outside baw? queue */ 2964227364Sadrian if (bf->bf_state.bfs_dobaw && 2965227364Sadrian (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, 2966227364Sadrian SEQNO(bf->bf_state.bfs_seqno)))) { 2967241336Sadrian ATH_TID_INSERT_HEAD(tid, bf, bf_list); 2968227364Sadrian ath_tx_tid_sched(sc, tid); 2969227364Sadrian return; 2970227364Sadrian } 2971227364Sadrian 2972240180Sadrian /* 2973240180Sadrian * This is a temporary check and should be removed once 2974240180Sadrian * all the relevant code paths have been fixed. 2975240180Sadrian * 2976240180Sadrian * During aggregate retries, it's possible that the head 2977240180Sadrian * frame will fail (which has the bfs_aggr and bfs_nframes 2978240180Sadrian * fields set for said aggregate) and will be retried as 2979240180Sadrian * a single frame. In this instance, the values should 2980240180Sadrian * be reset or the completion code will get upset with you. 2981240180Sadrian */ 2982240180Sadrian if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { 2983259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 2984259341Srpaulo "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, 2985259341Srpaulo bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); 2986240180Sadrian bf->bf_state.bfs_aggr = 0; 2987240180Sadrian bf->bf_state.bfs_nframes = 1; 2988240180Sadrian } 2989240180Sadrian 2990240724Sadrian /* Update CLRDMASK just before this frame is queued */ 2991240724Sadrian ath_tx_update_clrdmask(sc, tid, bf); 2992240724Sadrian 2993227364Sadrian /* Direct dispatch to hardware */ 2994227364Sadrian ath_tx_do_ratelookup(sc, bf); 2995233989Sadrian ath_tx_calc_duration(sc, bf); 2996233989Sadrian ath_tx_calc_protection(sc, bf); 2997233989Sadrian ath_tx_set_rtscts(sc, bf); 2998227364Sadrian ath_tx_rate_fill_rcflags(sc, bf); 2999227364Sadrian ath_tx_setds(sc, bf); 3000227364Sadrian 3001227364Sadrian /* Statistics */ 3002227364Sadrian sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; 3003227364Sadrian 3004227364Sadrian /* Track per-TID hardware queue depth correctly */ 3005227364Sadrian tid->hwq_depth++; 3006227364Sadrian 3007227364Sadrian /* Add to BAW */ 3008227364Sadrian if (bf->bf_state.bfs_dobaw) { 3009227364Sadrian ath_tx_addto_baw(sc, an, tid, bf); 3010227364Sadrian bf->bf_state.bfs_addedbaw = 1; 3011227364Sadrian } 3012227364Sadrian 3013227364Sadrian /* Set completion handler, multi-frame aggregate or not */ 3014227364Sadrian bf->bf_comp = ath_tx_aggr_comp; 3015227364Sadrian 3016250665Sadrian /* 3017250665Sadrian * Update the current leak count if 3018250665Sadrian * we're leaking frames; and set the 3019250665Sadrian * MORE flag as appropriate. 3020250665Sadrian */ 3021250665Sadrian ath_tx_leak_count_update(sc, tid, bf); 3022250665Sadrian 3023227364Sadrian /* Hand off to hardware */ 3024227364Sadrian ath_tx_handoff(sc, txq, bf); 3025227364Sadrian} 3026227364Sadrian 3027227364Sadrian/* 3028227364Sadrian * Attempt to send the packet. 3029227364Sadrian * If the queue isn't busy, direct-dispatch. 3030227364Sadrian * If the queue is busy enough, queue the given packet on the 3031227364Sadrian * relevant software queue. 3032227364Sadrian */ 3033227364Sadrianvoid 3034250665Sadrianath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, 3035250665Sadrian struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) 3036227364Sadrian{ 3037227364Sadrian struct ath_node *an = ATH_NODE(ni); 3038227364Sadrian struct ieee80211_frame *wh; 3039227364Sadrian struct ath_tid *atid; 3040227364Sadrian int pri, tid; 3041227364Sadrian struct mbuf *m0 = bf->bf_m; 3042227364Sadrian 3043243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3044236880Sadrian 3045227364Sadrian /* Fetch the TID - non-QoS frames get assigned to TID 16 */ 3046227364Sadrian wh = mtod(m0, struct ieee80211_frame *); 3047227364Sadrian pri = ath_tx_getac(sc, m0); 3048227364Sadrian tid = ath_tx_gettid(sc, m0); 3049227364Sadrian atid = &an->an_tid[tid]; 3050227364Sadrian 3051236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", 3052236872Sadrian __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); 3053227364Sadrian 3054227364Sadrian /* Set local packet state, used to queue packets to hardware */ 3055239051Sadrian /* XXX potentially duplicate info, re-check */ 3056227364Sadrian bf->bf_state.bfs_tid = tid; 3057244109Sadrian bf->bf_state.bfs_tx_queue = txq->axq_qnum; 3058227364Sadrian bf->bf_state.bfs_pri = pri; 3059227364Sadrian 3060227364Sadrian /* 3061227364Sadrian * If the hardware queue isn't busy, queue it directly. 3062227364Sadrian * If the hardware queue is busy, queue it. 3063227364Sadrian * If the TID is paused or the traffic it outside BAW, software 3064227364Sadrian * queue it. 3065250665Sadrian * 3066250665Sadrian * If the node is in power-save and we're leaking a frame, 3067250665Sadrian * leak a single frame. 3068227364Sadrian */ 3069250665Sadrian if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { 3070227364Sadrian /* TID is paused, queue */ 3071236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); 3072250665Sadrian /* 3073250665Sadrian * If the caller requested that it be sent at a high 3074250665Sadrian * priority, queue it at the head of the list. 3075250665Sadrian */ 3076250665Sadrian if (queue_to_head) 3077250665Sadrian ATH_TID_INSERT_HEAD(atid, bf, bf_list); 3078250665Sadrian else 3079250665Sadrian ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3080227364Sadrian } else if (ath_tx_ampdu_pending(sc, an, tid)) { 3081227364Sadrian /* AMPDU pending; queue */ 3082236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); 3083241336Sadrian ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3084227364Sadrian /* XXX sched? */ 3085227364Sadrian } else if (ath_tx_ampdu_running(sc, an, tid)) { 3086227364Sadrian /* AMPDU running, attempt direct dispatch if possible */ 3087236877Sadrian 3088236877Sadrian /* 3089236877Sadrian * Always queue the frame to the tail of the list. 3090236877Sadrian */ 3091241336Sadrian ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3092236877Sadrian 3093236877Sadrian /* 3094236877Sadrian * If the hardware queue isn't busy, direct dispatch 3095236877Sadrian * the head frame in the list. Don't schedule the 3096236877Sadrian * TID - let it build some more frames first? 3097236877Sadrian * 3098250866Sadrian * When running A-MPDU, always just check the hardware 3099250866Sadrian * queue depth against the aggregate frame limit. 3100250866Sadrian * We don't want to burst a large number of single frames 3101250866Sadrian * out to the hardware; we want to aggressively hold back. 3102250866Sadrian * 3103236877Sadrian * Otherwise, schedule the TID. 3104236877Sadrian */ 3105250866Sadrian /* XXX TXQ locking */ 3106250866Sadrian if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { 3107241336Sadrian bf = ATH_TID_FIRST(atid); 3108241336Sadrian ATH_TID_REMOVE(atid, bf, bf_list); 3109240180Sadrian 3110240180Sadrian /* 3111240180Sadrian * Ensure it's definitely treated as a non-AMPDU 3112240180Sadrian * frame - this information may have been left 3113240180Sadrian * over from a previous attempt. 3114240180Sadrian */ 3115240180Sadrian bf->bf_state.bfs_aggr = 0; 3116240180Sadrian bf->bf_state.bfs_nframes = 1; 3117240180Sadrian 3118240180Sadrian /* Queue to the hardware */ 3119239051Sadrian ath_tx_xmit_aggr(sc, an, txq, bf); 3120233227Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 3121236872Sadrian "%s: xmit_aggr\n", 3122236872Sadrian __func__); 3123229949Sadrian } else { 3124229949Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 3125236872Sadrian "%s: ampdu; swq'ing\n", 3126236872Sadrian __func__); 3127240899Sadrian 3128227364Sadrian ath_tx_tid_sched(sc, atid); 3129227364Sadrian } 3130250866Sadrian /* 3131250866Sadrian * If we're not doing A-MPDU, be prepared to direct dispatch 3132250866Sadrian * up to both limits if possible. This particular corner 3133250866Sadrian * case may end up with packet starvation between aggregate 3134250866Sadrian * traffic and non-aggregate traffic: we wnat to ensure 3135250866Sadrian * that non-aggregate stations get a few frames queued to the 3136250866Sadrian * hardware before the aggregate station(s) get their chance. 3137250866Sadrian * 3138250866Sadrian * So if you only ever see a couple of frames direct dispatched 3139250866Sadrian * to the hardware from a non-AMPDU client, check both here 3140250866Sadrian * and in the software queue dispatcher to ensure that those 3141250866Sadrian * non-AMPDU stations get a fair chance to transmit. 3142250866Sadrian */ 3143250866Sadrian /* XXX TXQ locking */ 3144250866Sadrian } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && 3145250866Sadrian (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { 3146227364Sadrian /* AMPDU not running, attempt direct dispatch */ 3147236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); 3148240926Sadrian /* See if clrdmask needs to be set */ 3149240926Sadrian ath_tx_update_clrdmask(sc, atid, bf); 3150250665Sadrian 3151250665Sadrian /* 3152250665Sadrian * Update the current leak count if 3153250665Sadrian * we're leaking frames; and set the 3154250665Sadrian * MORE flag as appropriate. 3155250665Sadrian */ 3156250665Sadrian ath_tx_leak_count_update(sc, atid, bf); 3157250665Sadrian 3158250665Sadrian /* 3159250665Sadrian * Dispatch the frame. 3160250665Sadrian */ 3161227364Sadrian ath_tx_xmit_normal(sc, txq, bf); 3162227364Sadrian } else { 3163227364Sadrian /* Busy; queue */ 3164236872Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); 3165241336Sadrian ATH_TID_INSERT_TAIL(atid, bf, bf_list); 3166227364Sadrian ath_tx_tid_sched(sc, atid); 3167227364Sadrian } 3168227364Sadrian} 3169227364Sadrian 3170227364Sadrian/* 3171245708Sadrian * Only set the clrdmask bit if none of the nodes are currently 3172245708Sadrian * filtered. 3173245708Sadrian * 3174245708Sadrian * XXX TODO: go through all the callers and check to see 3175245708Sadrian * which are being called in the context of looping over all 3176245708Sadrian * TIDs (eg, if all tids are being paused, resumed, etc.) 3177245708Sadrian * That'll avoid O(n^2) complexity here. 3178245708Sadrian */ 3179245708Sadrianstatic void 3180245708Sadrianath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) 3181245708Sadrian{ 3182245708Sadrian int i; 3183245708Sadrian 3184245708Sadrian ATH_TX_LOCK_ASSERT(sc); 3185245708Sadrian 3186245708Sadrian for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3187245708Sadrian if (an->an_tid[i].isfiltered == 1) 3188245739Sadrian return; 3189245708Sadrian } 3190245708Sadrian an->clrdmask = 1; 3191245708Sadrian} 3192245708Sadrian 3193245708Sadrian/* 3194227364Sadrian * Configure the per-TID node state. 3195227364Sadrian * 3196227364Sadrian * This likely belongs in if_ath_node.c but I can't think of anywhere 3197227364Sadrian * else to put it just yet. 3198227364Sadrian * 3199227364Sadrian * This sets up the SLISTs and the mutex as appropriate. 3200227364Sadrian */ 3201227364Sadrianvoid 3202227364Sadrianath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) 3203227364Sadrian{ 3204227364Sadrian int i, j; 3205227364Sadrian struct ath_tid *atid; 3206227364Sadrian 3207227364Sadrian for (i = 0; i < IEEE80211_TID_SIZE; i++) { 3208227364Sadrian atid = &an->an_tid[i]; 3209240639Sadrian 3210240639Sadrian /* XXX now with this bzer(), is the field 0'ing needed? */ 3211240639Sadrian bzero(atid, sizeof(*atid)); 3212240639Sadrian 3213241336Sadrian TAILQ_INIT(&atid->tid_q); 3214241336Sadrian TAILQ_INIT(&atid->filtq.tid_q); 3215227364Sadrian atid->tid = i; 3216227364Sadrian atid->an = an; 3217227364Sadrian for (j = 0; j < ATH_TID_MAX_BUFS; j++) 3218227364Sadrian atid->tx_buf[j] = NULL; 3219227364Sadrian atid->baw_head = atid->baw_tail = 0; 3220227364Sadrian atid->paused = 0; 3221227364Sadrian atid->sched = 0; 3222227364Sadrian atid->hwq_depth = 0; 3223227364Sadrian atid->cleanup_inprogress = 0; 3224227364Sadrian if (i == IEEE80211_NONQOS_TID) 3225240946Sadrian atid->ac = ATH_NONQOS_TID_AC; 3226227364Sadrian else 3227227364Sadrian atid->ac = TID_TO_WME_AC(i); 3228227364Sadrian } 3229245708Sadrian an->clrdmask = 1; /* Always start by setting this bit */ 3230227364Sadrian} 3231227364Sadrian 3232227364Sadrian/* 3233227364Sadrian * Pause the current TID. This stops packets from being transmitted 3234227364Sadrian * on it. 3235227364Sadrian * 3236227364Sadrian * Since this is also called from upper layers as well as the driver, 3237227364Sadrian * it will get the TID lock. 3238227364Sadrian */ 3239227364Sadrianstatic void 3240227364Sadrianath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) 3241227364Sadrian{ 3242233908Sadrian 3243243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3244227364Sadrian tid->paused++; 3245227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n", 3246227364Sadrian __func__, tid->paused); 3247227364Sadrian} 3248227364Sadrian 3249227364Sadrian/* 3250227364Sadrian * Unpause the current TID, and schedule it if needed. 3251227364Sadrian */ 3252227364Sadrianstatic void 3253227364Sadrianath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) 3254227364Sadrian{ 3255243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3256243786Sadrian 3257249713Sadrian /* 3258249713Sadrian * There's some odd places where ath_tx_tid_resume() is called 3259249713Sadrian * when it shouldn't be; this works around that particular issue 3260249713Sadrian * until it's actually resolved. 3261249713Sadrian */ 3262249713Sadrian if (tid->paused == 0) { 3263259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3264259341Srpaulo "%s: %6D: paused=0?\n", __func__, 3265259341Srpaulo tid->an->an_node.ni_macaddr, ":"); 3266249713Sadrian } else { 3267249713Sadrian tid->paused--; 3268249713Sadrian } 3269227364Sadrian 3270227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n", 3271227364Sadrian __func__, tid->paused); 3272227364Sadrian 3273241170Sadrian if (tid->paused) 3274227364Sadrian return; 3275227364Sadrian 3276241170Sadrian /* 3277241170Sadrian * Override the clrdmask configuration for the next frame 3278241170Sadrian * from this TID, just to get the ball rolling. 3279241170Sadrian */ 3280245708Sadrian ath_tx_set_clrdmask(sc, tid->an); 3281241170Sadrian 3282241170Sadrian if (tid->axq_depth == 0) 3283241170Sadrian return; 3284241170Sadrian 3285240639Sadrian /* XXX isfiltered shouldn't ever be 0 at this point */ 3286240639Sadrian if (tid->isfiltered == 1) { 3287259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", 3288259341Srpaulo __func__); 3289240639Sadrian return; 3290240639Sadrian } 3291240639Sadrian 3292227364Sadrian ath_tx_tid_sched(sc, tid); 3293246450Sadrian 3294246450Sadrian /* 3295246450Sadrian * Queue the software TX scheduler. 3296246450Sadrian */ 3297246450Sadrian ath_tx_swq_kick(sc); 3298227364Sadrian} 3299227364Sadrian 3300227364Sadrian/* 3301240639Sadrian * Add the given ath_buf to the TID filtered frame list. 3302240639Sadrian * This requires the TID be filtered. 3303240639Sadrian */ 3304240639Sadrianstatic void 3305240639Sadrianath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, 3306240639Sadrian struct ath_buf *bf) 3307240639Sadrian{ 3308240639Sadrian 3309243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3310243786Sadrian 3311259341Srpaulo if (!tid->isfiltered) 3312259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", 3313259341Srpaulo __func__); 3314240639Sadrian 3315240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); 3316240639Sadrian 3317240639Sadrian /* Set the retry bit and bump the retry counter */ 3318240639Sadrian ath_tx_set_retry(sc, bf); 3319240639Sadrian sc->sc_stats.ast_tx_swfiltered++; 3320240639Sadrian 3321241566Sadrian ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); 3322240639Sadrian} 3323240639Sadrian 3324240639Sadrian/* 3325240639Sadrian * Handle a completed filtered frame from the given TID. 3326240639Sadrian * This just enables/pauses the filtered frame state if required 3327240639Sadrian * and appends the filtered frame to the filtered queue. 3328240639Sadrian */ 3329240639Sadrianstatic void 3330240639Sadrianath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, 3331240639Sadrian struct ath_buf *bf) 3332240639Sadrian{ 3333240639Sadrian 3334243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3335240639Sadrian 3336240639Sadrian if (! tid->isfiltered) { 3337240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n", 3338240639Sadrian __func__); 3339240639Sadrian tid->isfiltered = 1; 3340240639Sadrian ath_tx_tid_pause(sc, tid); 3341240639Sadrian } 3342240639Sadrian 3343240639Sadrian /* Add the frame to the filter queue */ 3344240639Sadrian ath_tx_tid_filt_addbuf(sc, tid, bf); 3345240639Sadrian} 3346240639Sadrian 3347240639Sadrian/* 3348240639Sadrian * Complete the filtered frame TX completion. 3349240639Sadrian * 3350240639Sadrian * If there are no more frames in the hardware queue, unpause/unfilter 3351240639Sadrian * the TID if applicable. Otherwise we will wait for a node PS transition 3352240639Sadrian * to unfilter. 3353240639Sadrian */ 3354240639Sadrianstatic void 3355240639Sadrianath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) 3356240639Sadrian{ 3357240639Sadrian struct ath_buf *bf; 3358240639Sadrian 3359243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3360240639Sadrian 3361240639Sadrian if (tid->hwq_depth != 0) 3362240639Sadrian return; 3363240639Sadrian 3364240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n", 3365240639Sadrian __func__); 3366240639Sadrian tid->isfiltered = 0; 3367245708Sadrian /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ 3368245708Sadrian ath_tx_set_clrdmask(sc, tid->an); 3369240639Sadrian 3370240639Sadrian /* XXX this is really quite inefficient */ 3371241566Sadrian while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { 3372241566Sadrian ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3373241336Sadrian ATH_TID_INSERT_HEAD(tid, bf, bf_list); 3374240639Sadrian } 3375240639Sadrian 3376240639Sadrian ath_tx_tid_resume(sc, tid); 3377240639Sadrian} 3378240639Sadrian 3379240639Sadrian/* 3380240639Sadrian * Called when a single (aggregate or otherwise) frame is completed. 3381240639Sadrian * 3382240639Sadrian * Returns 1 if the buffer could be added to the filtered list 3383240639Sadrian * (cloned or otherwise), 0 if the buffer couldn't be added to the 3384240639Sadrian * filtered list (failed clone; expired retry) and the caller should 3385240639Sadrian * free it and handle it like a failure (eg by sending a BAR.) 3386240639Sadrian */ 3387240639Sadrianstatic int 3388240639Sadrianath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, 3389240639Sadrian struct ath_buf *bf) 3390240639Sadrian{ 3391240639Sadrian struct ath_buf *nbf; 3392240639Sadrian int retval; 3393240639Sadrian 3394243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3395240639Sadrian 3396240639Sadrian /* 3397240639Sadrian * Don't allow a filtered frame to live forever. 3398240639Sadrian */ 3399240639Sadrian if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3400241170Sadrian sc->sc_stats.ast_tx_swretrymax++; 3401240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3402240639Sadrian "%s: bf=%p, seqno=%d, exceeded retries\n", 3403240639Sadrian __func__, 3404240639Sadrian bf, 3405240639Sadrian bf->bf_state.bfs_seqno); 3406240639Sadrian return (0); 3407240639Sadrian } 3408240639Sadrian 3409240639Sadrian /* 3410240639Sadrian * A busy buffer can't be added to the retry list. 3411240639Sadrian * It needs to be cloned. 3412240639Sadrian */ 3413240639Sadrian if (bf->bf_flags & ATH_BUF_BUSY) { 3414240639Sadrian nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3415240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3416240639Sadrian "%s: busy buffer clone: %p -> %p\n", 3417240639Sadrian __func__, bf, nbf); 3418240639Sadrian } else { 3419240639Sadrian nbf = bf; 3420240639Sadrian } 3421240639Sadrian 3422240639Sadrian if (nbf == NULL) { 3423240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3424240639Sadrian "%s: busy buffer couldn't be cloned (%p)!\n", 3425240639Sadrian __func__, bf); 3426240639Sadrian retval = 1; 3427240639Sadrian } else { 3428240639Sadrian ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3429240639Sadrian retval = 0; 3430240639Sadrian } 3431240639Sadrian ath_tx_tid_filt_comp_complete(sc, tid); 3432240639Sadrian 3433240639Sadrian return (retval); 3434240639Sadrian} 3435240639Sadrian 3436240639Sadrianstatic void 3437240639Sadrianath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, 3438240639Sadrian struct ath_buf *bf_first, ath_bufhead *bf_q) 3439240639Sadrian{ 3440240639Sadrian struct ath_buf *bf, *bf_next, *nbf; 3441240639Sadrian 3442243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3443240639Sadrian 3444240639Sadrian bf = bf_first; 3445240639Sadrian while (bf) { 3446240639Sadrian bf_next = bf->bf_next; 3447240639Sadrian bf->bf_next = NULL; /* Remove it from the aggr list */ 3448240639Sadrian 3449240639Sadrian /* 3450240639Sadrian * Don't allow a filtered frame to live forever. 3451240639Sadrian */ 3452240639Sadrian if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { 3453247029Sadrian sc->sc_stats.ast_tx_swretrymax++; 3454240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3455240639Sadrian "%s: bf=%p, seqno=%d, exceeded retries\n", 3456240639Sadrian __func__, 3457240639Sadrian bf, 3458240639Sadrian bf->bf_state.bfs_seqno); 3459240639Sadrian TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3460240639Sadrian goto next; 3461240639Sadrian } 3462240639Sadrian 3463240639Sadrian if (bf->bf_flags & ATH_BUF_BUSY) { 3464240639Sadrian nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); 3465240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3466240639Sadrian "%s: busy buffer cloned: %p -> %p", 3467240639Sadrian __func__, bf, nbf); 3468240639Sadrian } else { 3469240639Sadrian nbf = bf; 3470240639Sadrian } 3471240639Sadrian 3472240639Sadrian /* 3473240639Sadrian * If the buffer couldn't be cloned, add it to bf_q; 3474240639Sadrian * the caller will free the buffer(s) as required. 3475240639Sadrian */ 3476240639Sadrian if (nbf == NULL) { 3477240639Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, 3478240639Sadrian "%s: buffer couldn't be cloned! (%p)\n", 3479240639Sadrian __func__, bf); 3480240639Sadrian TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 3481240639Sadrian } else { 3482240639Sadrian ath_tx_tid_filt_comp_buf(sc, tid, nbf); 3483240639Sadrian } 3484240639Sadriannext: 3485240639Sadrian bf = bf_next; 3486240639Sadrian } 3487240639Sadrian 3488240639Sadrian ath_tx_tid_filt_comp_complete(sc, tid); 3489240639Sadrian} 3490240639Sadrian 3491240639Sadrian/* 3492233908Sadrian * Suspend the queue because we need to TX a BAR. 3493233908Sadrian */ 3494233908Sadrianstatic void 3495233908Sadrianath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) 3496233908Sadrian{ 3497233908Sadrian 3498243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3499243786Sadrian 3500235491Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3501250705Sadrian "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", 3502233908Sadrian __func__, 3503250705Sadrian tid->tid, 3504235676Sadrian tid->bar_wait, 3505235676Sadrian tid->bar_tx); 3506233908Sadrian 3507233908Sadrian /* We shouldn't be called when bar_tx is 1 */ 3508233908Sadrian if (tid->bar_tx) { 3509259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3510259341Srpaulo "%s: bar_tx is 1?!\n", __func__); 3511233908Sadrian } 3512233908Sadrian 3513233908Sadrian /* If we've already been called, just be patient. */ 3514233908Sadrian if (tid->bar_wait) 3515233908Sadrian return; 3516233908Sadrian 3517233908Sadrian /* Wait! */ 3518233908Sadrian tid->bar_wait = 1; 3519233908Sadrian 3520233908Sadrian /* Only one pause, no matter how many frames fail */ 3521233908Sadrian ath_tx_tid_pause(sc, tid); 3522233908Sadrian} 3523233908Sadrian 3524233908Sadrian/* 3525233908Sadrian * We've finished with BAR handling - either we succeeded or 3526233908Sadrian * failed. Either way, unsuspend TX. 3527233908Sadrian */ 3528233908Sadrianstatic void 3529233908Sadrianath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) 3530233908Sadrian{ 3531233908Sadrian 3532243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3533243786Sadrian 3534235491Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3535250705Sadrian "%s: %6D: TID=%d, called\n", 3536233908Sadrian __func__, 3537250611Sadrian tid->an->an_node.ni_macaddr, 3538250611Sadrian ":", 3539250705Sadrian tid->tid); 3540233908Sadrian 3541233908Sadrian if (tid->bar_tx == 0 || tid->bar_wait == 0) { 3542259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3543250705Sadrian "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3544259341Srpaulo __func__, tid->an->an_node.ni_macaddr, ":", 3545259341Srpaulo tid->tid, tid->bar_tx, tid->bar_wait); 3546233908Sadrian } 3547233908Sadrian 3548233908Sadrian tid->bar_tx = tid->bar_wait = 0; 3549233908Sadrian ath_tx_tid_resume(sc, tid); 3550233908Sadrian} 3551233908Sadrian 3552233908Sadrian/* 3553233908Sadrian * Return whether we're ready to TX a BAR frame. 3554233908Sadrian * 3555233908Sadrian * Requires the TID lock be held. 3556233908Sadrian */ 3557233908Sadrianstatic int 3558233908Sadrianath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) 3559233908Sadrian{ 3560233908Sadrian 3561243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3562233908Sadrian 3563233908Sadrian if (tid->bar_wait == 0 || tid->hwq_depth > 0) 3564233908Sadrian return (0); 3565233908Sadrian 3566250611Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3567250705Sadrian "%s: %6D: TID=%d, bar ready\n", 3568250611Sadrian __func__, 3569250611Sadrian tid->an->an_node.ni_macaddr, 3570250611Sadrian ":", 3571250705Sadrian tid->tid); 3572235491Sadrian 3573233908Sadrian return (1); 3574233908Sadrian} 3575233908Sadrian 3576233908Sadrian/* 3577233908Sadrian * Check whether the current TID is ready to have a BAR 3578233908Sadrian * TXed and if so, do the TX. 3579233908Sadrian * 3580233908Sadrian * Since the TID/TXQ lock can't be held during a call to 3581233908Sadrian * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, 3582233908Sadrian * sending the BAR and locking it again. 3583233908Sadrian * 3584233908Sadrian * Eventually, the code to send the BAR should be broken out 3585233908Sadrian * from this routine so the lock doesn't have to be reacquired 3586233908Sadrian * just to be immediately dropped by the caller. 3587233908Sadrian */ 3588233908Sadrianstatic void 3589233908Sadrianath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) 3590233908Sadrian{ 3591233908Sadrian struct ieee80211_tx_ampdu *tap; 3592233908Sadrian 3593243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3594233908Sadrian 3595235491Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3596250705Sadrian "%s: %6D: TID=%d, called\n", 3597233908Sadrian __func__, 3598250611Sadrian tid->an->an_node.ni_macaddr, 3599250611Sadrian ":", 3600250705Sadrian tid->tid); 3601233908Sadrian 3602233908Sadrian tap = ath_tx_get_tx_tid(tid->an, tid->tid); 3603233908Sadrian 3604233908Sadrian /* 3605233908Sadrian * This is an error condition! 3606233908Sadrian */ 3607233908Sadrian if (tid->bar_wait == 0 || tid->bar_tx == 1) { 3608259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3609250705Sadrian "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", 3610259341Srpaulo __func__, tid->an->an_node.ni_macaddr, ":", 3611259341Srpaulo tid->tid, tid->bar_tx, tid->bar_wait); 3612233908Sadrian return; 3613233908Sadrian } 3614233908Sadrian 3615233908Sadrian /* Don't do anything if we still have pending frames */ 3616233908Sadrian if (tid->hwq_depth > 0) { 3617235491Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3618250705Sadrian "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", 3619233908Sadrian __func__, 3620250611Sadrian tid->an->an_node.ni_macaddr, 3621250611Sadrian ":", 3622250705Sadrian tid->tid, 3623233908Sadrian tid->hwq_depth); 3624233908Sadrian return; 3625233908Sadrian } 3626233908Sadrian 3627233908Sadrian /* We're now about to TX */ 3628233908Sadrian tid->bar_tx = 1; 3629233908Sadrian 3630233908Sadrian /* 3631240724Sadrian * Override the clrdmask configuration for the next frame, 3632240724Sadrian * just to get the ball rolling. 3633240724Sadrian */ 3634245708Sadrian ath_tx_set_clrdmask(sc, tid->an); 3635240724Sadrian 3636240724Sadrian /* 3637233908Sadrian * Calculate new BAW left edge, now that all frames have either 3638233908Sadrian * succeeded or failed. 3639233908Sadrian * 3640233908Sadrian * XXX verify this is _actually_ the valid value to begin at! 3641233908Sadrian */ 3642235491Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3643250705Sadrian "%s: %6D: TID=%d, new BAW left edge=%d\n", 3644233908Sadrian __func__, 3645250611Sadrian tid->an->an_node.ni_macaddr, 3646250611Sadrian ":", 3647250705Sadrian tid->tid, 3648233908Sadrian tap->txa_start); 3649233908Sadrian 3650233908Sadrian /* Try sending the BAR frame */ 3651233908Sadrian /* We can't hold the lock here! */ 3652233908Sadrian 3653243786Sadrian ATH_TX_UNLOCK(sc); 3654233908Sadrian if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { 3655233908Sadrian /* Success? Now we wait for notification that it's done */ 3656243786Sadrian ATH_TX_LOCK(sc); 3657233908Sadrian return; 3658233908Sadrian } 3659233908Sadrian 3660233908Sadrian /* Failure? For now, warn loudly and continue */ 3661243786Sadrian ATH_TX_LOCK(sc); 3662259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 3663250705Sadrian "%s: %6D: TID=%d, failed to TX BAR, continue!\n", 3664259341Srpaulo __func__, tid->an->an_node.ni_macaddr, ":", 3665250705Sadrian tid->tid); 3666233908Sadrian ath_tx_tid_bar_unsuspend(sc, tid); 3667233908Sadrian} 3668233908Sadrian 3669240639Sadrianstatic void 3670240639Sadrianath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, 3671240639Sadrian struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) 3672240639Sadrian{ 3673233908Sadrian 3674243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3675240639Sadrian 3676240639Sadrian /* 3677240639Sadrian * If the current TID is running AMPDU, update 3678240639Sadrian * the BAW. 3679240639Sadrian */ 3680240639Sadrian if (ath_tx_ampdu_running(sc, an, tid->tid) && 3681240639Sadrian bf->bf_state.bfs_dobaw) { 3682240639Sadrian /* 3683240639Sadrian * Only remove the frame from the BAW if it's 3684240639Sadrian * been transmitted at least once; this means 3685240639Sadrian * the frame was in the BAW to begin with. 3686240639Sadrian */ 3687240639Sadrian if (bf->bf_state.bfs_retries > 0) { 3688240639Sadrian ath_tx_update_baw(sc, an, tid, bf); 3689240639Sadrian bf->bf_state.bfs_dobaw = 0; 3690240639Sadrian } 3691247135Sadrian#if 0 3692240639Sadrian /* 3693240639Sadrian * This has become a non-fatal error now 3694240639Sadrian */ 3695240639Sadrian if (! bf->bf_state.bfs_addedbaw) 3696259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW 3697240639Sadrian "%s: wasn't added: seqno %d\n", 3698240639Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 3699247135Sadrian#endif 3700240639Sadrian } 3701248671Sadrian 3702248671Sadrian /* Strip it out of an aggregate list if it was in one */ 3703248671Sadrian bf->bf_next = NULL; 3704248671Sadrian 3705248671Sadrian /* Insert on the free queue to be freed by the caller */ 3706240639Sadrian TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 3707240639Sadrian} 3708240639Sadrian 3709240639Sadrianstatic void 3710240639Sadrianath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, 3711240899Sadrian const char *pfx, struct ath_tid *tid, struct ath_buf *bf) 3712240639Sadrian{ 3713240639Sadrian struct ieee80211_node *ni = &an->an_node; 3714259341Srpaulo struct ath_txq *txq; 3715240639Sadrian struct ieee80211_tx_ampdu *tap; 3716240639Sadrian 3717259341Srpaulo txq = sc->sc_ac2q[tid->ac]; 3718240639Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 3719240639Sadrian 3720259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 3721254435Sadrian "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " 3722240639Sadrian "seqno=%d, retry=%d\n", 3723254435Sadrian __func__, 3724254435Sadrian pfx, 3725254435Sadrian ni->ni_macaddr, 3726254435Sadrian ":", 3727254435Sadrian bf, 3728240639Sadrian bf->bf_state.bfs_addedbaw, 3729240639Sadrian bf->bf_state.bfs_dobaw, 3730240639Sadrian SEQNO(bf->bf_state.bfs_seqno), 3731240639Sadrian bf->bf_state.bfs_retries); 3732259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 3733254435Sadrian "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", 3734254435Sadrian __func__, 3735254435Sadrian pfx, 3736254435Sadrian ni->ni_macaddr, 3737254435Sadrian ":", 3738254435Sadrian bf, 3739240899Sadrian txq->axq_qnum, 3740240899Sadrian txq->axq_depth, 3741240899Sadrian txq->axq_aggr_depth); 3742259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 3743254435Sadrian "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " 3744254435Sadrian "isfiltered=%d\n", 3745254435Sadrian __func__, 3746254435Sadrian pfx, 3747254435Sadrian ni->ni_macaddr, 3748254435Sadrian ":", 3749254435Sadrian bf, 3750240639Sadrian tid->axq_depth, 3751240639Sadrian tid->hwq_depth, 3752240639Sadrian tid->bar_wait, 3753240639Sadrian tid->isfiltered); 3754259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 3755254435Sadrian "%s: %s: %6D: tid %d: " 3756240724Sadrian "sched=%d, paused=%d, " 3757240724Sadrian "incomp=%d, baw_head=%d, " 3758240639Sadrian "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", 3759254435Sadrian __func__, 3760254435Sadrian pfx, 3761254435Sadrian ni->ni_macaddr, 3762254435Sadrian ":", 3763254435Sadrian tid->tid, 3764240724Sadrian tid->sched, tid->paused, 3765240724Sadrian tid->incomp, tid->baw_head, 3766240639Sadrian tid->baw_tail, tap == NULL ? -1 : tap->txa_start, 3767240639Sadrian ni->ni_txseqs[tid->tid]); 3768240639Sadrian 3769240639Sadrian /* XXX Dump the frame, see what it is? */ 3770262998Srpaulo if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) 3771262998Srpaulo ieee80211_dump_pkt(ni->ni_ic, 3772262998Srpaulo mtod(bf->bf_m, const uint8_t *), 3773262998Srpaulo bf->bf_m->m_len, 0, -1); 3774240639Sadrian} 3775240639Sadrian 3776233908Sadrian/* 3777227364Sadrian * Free any packets currently pending in the software TX queue. 3778227364Sadrian * 3779227364Sadrian * This will be called when a node is being deleted. 3780227364Sadrian * 3781227364Sadrian * It can also be called on an active node during an interface 3782227364Sadrian * reset or state transition. 3783227364Sadrian * 3784227364Sadrian * (From Linux/reference): 3785227364Sadrian * 3786227364Sadrian * TODO: For frame(s) that are in the retry state, we will reuse the 3787227364Sadrian * sequence number(s) without setting the retry bit. The 3788227364Sadrian * alternative is to give up on these and BAR the receiver's window 3789227364Sadrian * forward. 3790227364Sadrian */ 3791227364Sadrianstatic void 3792229949Sadrianath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, 3793229949Sadrian struct ath_tid *tid, ath_bufhead *bf_cq) 3794227364Sadrian{ 3795227364Sadrian struct ath_buf *bf; 3796227364Sadrian struct ieee80211_tx_ampdu *tap; 3797227364Sadrian struct ieee80211_node *ni = &an->an_node; 3798240639Sadrian int t; 3799227364Sadrian 3800227364Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 3801227364Sadrian 3802243786Sadrian ATH_TX_LOCK_ASSERT(sc); 3803227364Sadrian 3804227364Sadrian /* Walk the queue, free frames */ 3805240639Sadrian t = 0; 3806227364Sadrian for (;;) { 3807241336Sadrian bf = ATH_TID_FIRST(tid); 3808227364Sadrian if (bf == NULL) { 3809227364Sadrian break; 3810227364Sadrian } 3811227364Sadrian 3812227364Sadrian if (t == 0) { 3813240899Sadrian ath_tx_tid_drain_print(sc, an, "norm", tid, bf); 3814240639Sadrian t = 1; 3815240639Sadrian } 3816229165Sadrian 3817241336Sadrian ATH_TID_REMOVE(tid, bf, bf_list); 3818240639Sadrian ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3819240639Sadrian } 3820229165Sadrian 3821240639Sadrian /* And now, drain the filtered frame queue */ 3822240639Sadrian t = 0; 3823240639Sadrian for (;;) { 3824241566Sadrian bf = ATH_TID_FILT_FIRST(tid); 3825240639Sadrian if (bf == NULL) 3826240639Sadrian break; 3827240639Sadrian 3828240639Sadrian if (t == 0) { 3829240899Sadrian ath_tx_tid_drain_print(sc, an, "filt", tid, bf); 3830233897Sadrian t = 1; 3831227364Sadrian } 3832227364Sadrian 3833241566Sadrian ATH_TID_FILT_REMOVE(tid, bf, bf_list); 3834240639Sadrian ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); 3835227364Sadrian } 3836227364Sadrian 3837227364Sadrian /* 3838240724Sadrian * Override the clrdmask configuration for the next frame 3839240724Sadrian * in case there is some future transmission, just to get 3840240724Sadrian * the ball rolling. 3841240724Sadrian * 3842240724Sadrian * This won't hurt things if the TID is about to be freed. 3843240724Sadrian */ 3844245708Sadrian ath_tx_set_clrdmask(sc, tid->an); 3845240724Sadrian 3846240724Sadrian /* 3847227364Sadrian * Now that it's completed, grab the TID lock and update 3848227364Sadrian * the sequence number and BAW window. 3849227364Sadrian * Because sequence numbers have been assigned to frames 3850227364Sadrian * that haven't been sent yet, it's entirely possible 3851227364Sadrian * we'll be called with some pending frames that have not 3852227364Sadrian * been transmitted. 3853227364Sadrian * 3854227364Sadrian * The cleaner solution is to do the sequence number allocation 3855227364Sadrian * when the packet is first transmitted - and thus the "retries" 3856227364Sadrian * check above would be enough to update the BAW/seqno. 3857227364Sadrian */ 3858227364Sadrian 3859227364Sadrian /* But don't do it for non-QoS TIDs */ 3860227364Sadrian if (tap) { 3861250611Sadrian#if 1 3862227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 3863250611Sadrian "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", 3864250611Sadrian __func__, 3865250611Sadrian ni->ni_macaddr, 3866250611Sadrian ":", 3867250611Sadrian an, 3868250611Sadrian tid->tid, 3869250611Sadrian tap->txa_start); 3870227364Sadrian#endif 3871227364Sadrian ni->ni_txseqs[tid->tid] = tap->txa_start; 3872227364Sadrian tid->baw_tail = tid->baw_head; 3873227364Sadrian } 3874227364Sadrian} 3875227364Sadrian 3876227364Sadrian/* 3877250608Sadrian * Reset the TID state. This must be only called once the node has 3878250608Sadrian * had its frames flushed from this TID, to ensure that no other 3879250608Sadrian * pause / unpause logic can kick in. 3880250608Sadrian */ 3881250608Sadrianstatic void 3882250608Sadrianath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) 3883250608Sadrian{ 3884250608Sadrian 3885250608Sadrian#if 0 3886250608Sadrian tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; 3887250608Sadrian tid->paused = tid->sched = tid->addba_tx_pending = 0; 3888250608Sadrian tid->incomp = tid->cleanup_inprogress = 0; 3889250608Sadrian#endif 3890250608Sadrian 3891250608Sadrian /* 3892250608Sadrian * If we have a bar_wait set, we need to unpause the TID 3893250608Sadrian * here. Otherwise once cleanup has finished, the TID won't 3894250608Sadrian * have the right paused counter. 3895250608Sadrian * 3896250608Sadrian * XXX I'm not going through resume here - I don't want the 3897250608Sadrian * node to be rescheuled just yet. This however should be 3898250608Sadrian * methodized! 3899250608Sadrian */ 3900250608Sadrian if (tid->bar_wait) { 3901250608Sadrian if (tid->paused > 0) { 3902250608Sadrian tid->paused --; 3903250608Sadrian } 3904250608Sadrian } 3905250608Sadrian 3906250608Sadrian /* 3907250608Sadrian * XXX same with a currently filtered TID. 3908250608Sadrian * 3909250608Sadrian * Since this is being called during a flush, we assume that 3910250608Sadrian * the filtered frame list is actually empty. 3911250608Sadrian * 3912250608Sadrian * XXX TODO: add in a check to ensure that the filtered queue 3913250608Sadrian * depth is actually 0! 3914250608Sadrian */ 3915250608Sadrian if (tid->isfiltered) { 3916250608Sadrian if (tid->paused > 0) { 3917250608Sadrian tid->paused --; 3918250608Sadrian } 3919250608Sadrian } 3920250608Sadrian 3921250608Sadrian /* 3922250608Sadrian * Clear BAR, filtered frames, scheduled and ADDBA pending. 3923250608Sadrian * The TID may be going through cleanup from the last association 3924250608Sadrian * where things in the BAW are still in the hardware queue. 3925250608Sadrian */ 3926250608Sadrian tid->bar_wait = 0; 3927250608Sadrian tid->bar_tx = 0; 3928250608Sadrian tid->isfiltered = 0; 3929250608Sadrian tid->sched = 0; 3930250608Sadrian tid->addba_tx_pending = 0; 3931250608Sadrian 3932250608Sadrian /* 3933250608Sadrian * XXX TODO: it may just be enough to walk the HWQs and mark 3934250608Sadrian * frames for that node as non-aggregate; or mark the ath_node 3935250608Sadrian * with something that indicates that aggregation is no longer 3936250608Sadrian * occuring. Then we can just toss the BAW complaints and 3937250608Sadrian * do a complete hard reset of state here - no pause, no 3938250608Sadrian * complete counter, etc. 3939250608Sadrian */ 3940250665Sadrian 3941250608Sadrian} 3942250608Sadrian 3943250608Sadrian/* 3944227364Sadrian * Flush all software queued packets for the given node. 3945227364Sadrian * 3946227364Sadrian * This occurs when a completion handler frees the last buffer 3947227364Sadrian * for a node, and the node is thus freed. This causes the node 3948227364Sadrian * to be cleaned up, which ends up calling ath_tx_node_flush. 3949227364Sadrian */ 3950227364Sadrianvoid 3951227364Sadrianath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) 3952227364Sadrian{ 3953227364Sadrian int tid; 3954227364Sadrian ath_bufhead bf_cq; 3955227364Sadrian struct ath_buf *bf; 3956227364Sadrian 3957227364Sadrian TAILQ_INIT(&bf_cq); 3958227364Sadrian 3959240899Sadrian ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", 3960240899Sadrian &an->an_node); 3961240899Sadrian 3962243786Sadrian ATH_TX_LOCK(sc); 3963250611Sadrian DPRINTF(sc, ATH_DEBUG_NODE, 3964250611Sadrian "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " 3965250665Sadrian "swq_depth=%d, clrdmask=%d, leak_count=%d\n", 3966250611Sadrian __func__, 3967250611Sadrian an->an_node.ni_macaddr, 3968250611Sadrian ":", 3969250611Sadrian an->an_is_powersave, 3970250611Sadrian an->an_stack_psq, 3971250611Sadrian an->an_tim_set, 3972250611Sadrian an->an_swq_depth, 3973250665Sadrian an->clrdmask, 3974250665Sadrian an->an_leak_count); 3975250611Sadrian 3976227364Sadrian for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 3977227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 3978227364Sadrian 3979227364Sadrian /* Free packets */ 3980227364Sadrian ath_tx_tid_drain(sc, an, atid, &bf_cq); 3981250665Sadrian 3982240914Sadrian /* Remove this tid from the list of active tids */ 3983240914Sadrian ath_tx_tid_unsched(sc, atid); 3984250665Sadrian 3985250608Sadrian /* Reset the per-TID pause, BAR, etc state */ 3986250608Sadrian ath_tx_tid_reset(sc, atid); 3987227364Sadrian } 3988250665Sadrian 3989250665Sadrian /* 3990250665Sadrian * Clear global leak count 3991250665Sadrian */ 3992250665Sadrian an->an_leak_count = 0; 3993243786Sadrian ATH_TX_UNLOCK(sc); 3994227364Sadrian 3995227364Sadrian /* Handle completed frames */ 3996227364Sadrian while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 3997227364Sadrian TAILQ_REMOVE(&bf_cq, bf, bf_list); 3998227364Sadrian ath_tx_default_comp(sc, bf, 0); 3999227364Sadrian } 4000227364Sadrian} 4001227364Sadrian 4002227364Sadrian/* 4003227364Sadrian * Drain all the software TXQs currently with traffic queued. 4004227364Sadrian */ 4005227364Sadrianvoid 4006227364Sadrianath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) 4007227364Sadrian{ 4008227364Sadrian struct ath_tid *tid; 4009227364Sadrian ath_bufhead bf_cq; 4010227364Sadrian struct ath_buf *bf; 4011227364Sadrian 4012227364Sadrian TAILQ_INIT(&bf_cq); 4013243786Sadrian ATH_TX_LOCK(sc); 4014227364Sadrian 4015227364Sadrian /* 4016227364Sadrian * Iterate over all active tids for the given txq, 4017227364Sadrian * flushing and unsched'ing them 4018227364Sadrian */ 4019227364Sadrian while (! TAILQ_EMPTY(&txq->axq_tidq)) { 4020227364Sadrian tid = TAILQ_FIRST(&txq->axq_tidq); 4021227364Sadrian ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); 4022227364Sadrian ath_tx_tid_unsched(sc, tid); 4023227364Sadrian } 4024227364Sadrian 4025243786Sadrian ATH_TX_UNLOCK(sc); 4026227364Sadrian 4027227364Sadrian while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4028227364Sadrian TAILQ_REMOVE(&bf_cq, bf, bf_list); 4029227364Sadrian ath_tx_default_comp(sc, bf, 0); 4030227364Sadrian } 4031227364Sadrian} 4032227364Sadrian 4033227364Sadrian/* 4034227364Sadrian * Handle completion of non-aggregate session frames. 4035240883Sadrian * 4036240883Sadrian * This (currently) doesn't implement software retransmission of 4037240883Sadrian * non-aggregate frames! 4038240883Sadrian * 4039240883Sadrian * Software retransmission of non-aggregate frames needs to obey 4040240883Sadrian * the strict sequence number ordering, and drop any frames that 4041240883Sadrian * will fail this. 4042240883Sadrian * 4043240883Sadrian * For now, filtered frames and frame transmission will cause 4044240883Sadrian * all kinds of issues. So we don't support them. 4045240883Sadrian * 4046240883Sadrian * So anyone queuing frames via ath_tx_normal_xmit() or 4047240883Sadrian * ath_tx_hw_queue_norm() must override and set CLRDMASK. 4048227364Sadrian */ 4049227364Sadrianvoid 4050227364Sadrianath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4051227364Sadrian{ 4052227364Sadrian struct ieee80211_node *ni = bf->bf_node; 4053227364Sadrian struct ath_node *an = ATH_NODE(ni); 4054227364Sadrian int tid = bf->bf_state.bfs_tid; 4055227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4056227364Sadrian struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4057227364Sadrian 4058227364Sadrian /* The TID state is protected behind the TXQ lock */ 4059243786Sadrian ATH_TX_LOCK(sc); 4060227364Sadrian 4061227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", 4062227364Sadrian __func__, bf, fail, atid->hwq_depth - 1); 4063227364Sadrian 4064227364Sadrian atid->hwq_depth--; 4065240639Sadrian 4066240883Sadrian#if 0 4067240883Sadrian /* 4068240883Sadrian * If the frame was filtered, stick it on the filter frame 4069240883Sadrian * queue and complain about it. It shouldn't happen! 4070240883Sadrian */ 4071240883Sadrian if ((ts->ts_status & HAL_TXERR_FILT) || 4072240883Sadrian (ts->ts_status != 0 && atid->isfiltered)) { 4073259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 4074240883Sadrian "%s: isfiltered=%d, ts_status=%d: huh?\n", 4075240883Sadrian __func__, 4076240883Sadrian atid->isfiltered, 4077240883Sadrian ts->ts_status); 4078240883Sadrian ath_tx_tid_filt_comp_buf(sc, atid, bf); 4079240883Sadrian } 4080240883Sadrian#endif 4081240639Sadrian if (atid->isfiltered) 4082259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); 4083227364Sadrian if (atid->hwq_depth < 0) 4084259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4085227364Sadrian __func__, atid->hwq_depth); 4086240639Sadrian 4087240639Sadrian /* 4088240639Sadrian * If the queue is filtered, potentially mark it as complete 4089240639Sadrian * and reschedule it as needed. 4090240639Sadrian * 4091240639Sadrian * This is required as there may be a subsequent TX descriptor 4092240639Sadrian * for this end-node that has CLRDMASK set, so it's quite possible 4093240639Sadrian * that a filtered frame will be followed by a non-filtered 4094240639Sadrian * (complete or otherwise) frame. 4095240639Sadrian * 4096240639Sadrian * XXX should we do this before we complete the frame? 4097240639Sadrian */ 4098240639Sadrian if (atid->isfiltered) 4099240639Sadrian ath_tx_tid_filt_comp_complete(sc, atid); 4100243786Sadrian ATH_TX_UNLOCK(sc); 4101227364Sadrian 4102227364Sadrian /* 4103227364Sadrian * punt to rate control if we're not being cleaned up 4104227364Sadrian * during a hw queue drain and the frame wanted an ACK. 4105227364Sadrian */ 4106233966Sadrian if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4107227364Sadrian ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4108227364Sadrian ts, bf->bf_state.bfs_pktlen, 4109227364Sadrian 1, (ts->ts_status == 0) ? 0 : 1); 4110227364Sadrian 4111227364Sadrian ath_tx_default_comp(sc, bf, fail); 4112227364Sadrian} 4113227364Sadrian 4114227364Sadrian/* 4115227364Sadrian * Handle cleanup of aggregate session packets that aren't 4116227364Sadrian * an A-MPDU. 4117227364Sadrian * 4118227364Sadrian * There's no need to update the BAW here - the session is being 4119227364Sadrian * torn down. 4120227364Sadrian */ 4121227364Sadrianstatic void 4122227364Sadrianath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4123227364Sadrian{ 4124227364Sadrian struct ieee80211_node *ni = bf->bf_node; 4125227364Sadrian struct ath_node *an = ATH_NODE(ni); 4126227364Sadrian int tid = bf->bf_state.bfs_tid; 4127227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4128227364Sadrian 4129227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", 4130227364Sadrian __func__, tid, atid->incomp); 4131227364Sadrian 4132243786Sadrian ATH_TX_LOCK(sc); 4133227364Sadrian atid->incomp--; 4134227364Sadrian if (atid->incomp == 0) { 4135227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4136227364Sadrian "%s: TID %d: cleaned up! resume!\n", 4137227364Sadrian __func__, tid); 4138227364Sadrian atid->cleanup_inprogress = 0; 4139227364Sadrian ath_tx_tid_resume(sc, atid); 4140227364Sadrian } 4141243786Sadrian ATH_TX_UNLOCK(sc); 4142227364Sadrian 4143227364Sadrian ath_tx_default_comp(sc, bf, 0); 4144227364Sadrian} 4145227364Sadrian 4146227364Sadrian/* 4147227364Sadrian * Performs transmit side cleanup when TID changes from aggregated to 4148227364Sadrian * unaggregated. 4149227364Sadrian * 4150227364Sadrian * - Discard all retry frames from the s/w queue. 4151227364Sadrian * - Fix the tx completion function for all buffers in s/w queue. 4152227364Sadrian * - Count the number of unacked frames, and let transmit completion 4153227364Sadrian * handle it later. 4154227364Sadrian * 4155251090Sadrian * The caller is responsible for pausing the TID and unpausing the 4156251090Sadrian * TID if no cleanup was required. Otherwise the cleanup path will 4157251090Sadrian * unpause the TID once the last hardware queued frame is completed. 4158227364Sadrian */ 4159227364Sadrianstatic void 4160250608Sadrianath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, 4161250608Sadrian ath_bufhead *bf_cq) 4162227364Sadrian{ 4163227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4164227364Sadrian struct ieee80211_tx_ampdu *tap; 4165227364Sadrian struct ath_buf *bf, *bf_next; 4166227364Sadrian 4167250608Sadrian ATH_TX_LOCK_ASSERT(sc); 4168250608Sadrian 4169235774Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4170227364Sadrian "%s: TID %d: called\n", __func__, tid); 4171227364Sadrian 4172227364Sadrian /* 4173240639Sadrian * Move the filtered frames to the TX queue, before 4174240639Sadrian * we run off and discard/process things. 4175240639Sadrian */ 4176240639Sadrian /* XXX this is really quite inefficient */ 4177241566Sadrian while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { 4178241566Sadrian ATH_TID_FILT_REMOVE(atid, bf, bf_list); 4179241336Sadrian ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4180240639Sadrian } 4181240639Sadrian 4182240639Sadrian /* 4183227364Sadrian * Update the frames in the software TX queue: 4184227364Sadrian * 4185227364Sadrian * + Discard retry frames in the queue 4186227364Sadrian * + Fix the completion function to be non-aggregate 4187227364Sadrian */ 4188241336Sadrian bf = ATH_TID_FIRST(atid); 4189227364Sadrian while (bf) { 4190227364Sadrian if (bf->bf_state.bfs_isretried) { 4191227364Sadrian bf_next = TAILQ_NEXT(bf, bf_list); 4192241336Sadrian ATH_TID_REMOVE(atid, bf, bf_list); 4193227364Sadrian if (bf->bf_state.bfs_dobaw) { 4194227364Sadrian ath_tx_update_baw(sc, an, atid, bf); 4195259341Srpaulo if (!bf->bf_state.bfs_addedbaw) 4196259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4197227364Sadrian "%s: wasn't added: seqno %d\n", 4198229949Sadrian __func__, 4199229949Sadrian SEQNO(bf->bf_state.bfs_seqno)); 4200227364Sadrian } 4201227364Sadrian bf->bf_state.bfs_dobaw = 0; 4202227364Sadrian /* 4203227364Sadrian * Call the default completion handler with "fail" just 4204227364Sadrian * so upper levels are suitably notified about this. 4205227364Sadrian */ 4206250608Sadrian TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); 4207227364Sadrian bf = bf_next; 4208227364Sadrian continue; 4209227364Sadrian } 4210227364Sadrian /* Give these the default completion handler */ 4211227364Sadrian bf->bf_comp = ath_tx_normal_comp; 4212227364Sadrian bf = TAILQ_NEXT(bf, bf_list); 4213227364Sadrian } 4214227364Sadrian 4215227364Sadrian /* 4216227364Sadrian * Calculate what hardware-queued frames exist based 4217227364Sadrian * on the current BAW size. Ie, what frames have been 4218227364Sadrian * added to the TX hardware queue for this TID but 4219227364Sadrian * not yet ACKed. 4220227364Sadrian */ 4221227364Sadrian tap = ath_tx_get_tx_tid(an, tid); 4222227364Sadrian /* Need the lock - fiddling with BAW */ 4223227364Sadrian while (atid->baw_head != atid->baw_tail) { 4224227364Sadrian if (atid->tx_buf[atid->baw_head]) { 4225227364Sadrian atid->incomp++; 4226227364Sadrian atid->cleanup_inprogress = 1; 4227227364Sadrian atid->tx_buf[atid->baw_head] = NULL; 4228227364Sadrian } 4229227364Sadrian INCR(atid->baw_head, ATH_TID_MAX_BUFS); 4230227364Sadrian INCR(tap->txa_start, IEEE80211_SEQ_RANGE); 4231227364Sadrian } 4232227364Sadrian 4233227364Sadrian if (atid->cleanup_inprogress) 4234227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4235227364Sadrian "%s: TID %d: cleanup needed: %d packets\n", 4236227364Sadrian __func__, tid, atid->incomp); 4237227364Sadrian 4238250608Sadrian /* Owner now must free completed frames */ 4239227364Sadrian} 4240227364Sadrian 4241227364Sadrianstatic struct ath_buf * 4242227398Sadrianath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, 4243227398Sadrian struct ath_tid *tid, struct ath_buf *bf) 4244227364Sadrian{ 4245227364Sadrian struct ath_buf *nbf; 4246227364Sadrian int error; 4247227364Sadrian 4248248988Sadrian /* 4249248988Sadrian * Clone the buffer. This will handle the dma unmap and 4250248988Sadrian * copy the node reference to the new buffer. If this 4251248988Sadrian * works out, 'bf' will have no DMA mapping, no mbuf 4252248988Sadrian * pointer and no node reference. 4253248988Sadrian */ 4254227364Sadrian nbf = ath_buf_clone(sc, bf); 4255227364Sadrian 4256227364Sadrian#if 0 4257259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", 4258227364Sadrian __func__); 4259227364Sadrian#endif 4260227364Sadrian 4261227364Sadrian if (nbf == NULL) { 4262227364Sadrian /* Failed to clone */ 4263259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 4264227364Sadrian "%s: failed to clone a busy buffer\n", 4265227364Sadrian __func__); 4266227364Sadrian return NULL; 4267227364Sadrian } 4268227364Sadrian 4269227364Sadrian /* Setup the dma for the new buffer */ 4270227364Sadrian error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); 4271227364Sadrian if (error != 0) { 4272259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 4273227364Sadrian "%s: failed to setup dma for clone\n", 4274227364Sadrian __func__); 4275227364Sadrian /* 4276227364Sadrian * Put this at the head of the list, not tail; 4277227364Sadrian * that way it doesn't interfere with the 4278227364Sadrian * busy buffer logic (which uses the tail of 4279227364Sadrian * the list.) 4280227364Sadrian */ 4281227364Sadrian ATH_TXBUF_LOCK(sc); 4282236994Sadrian ath_returnbuf_head(sc, nbf); 4283227364Sadrian ATH_TXBUF_UNLOCK(sc); 4284227364Sadrian return NULL; 4285227364Sadrian } 4286227364Sadrian 4287227398Sadrian /* Update BAW if required, before we free the original buf */ 4288227398Sadrian if (bf->bf_state.bfs_dobaw) 4289227398Sadrian ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); 4290227398Sadrian 4291248988Sadrian /* Free original buffer; return new buffer */ 4292227364Sadrian ath_freebuf(sc, bf); 4293240639Sadrian 4294227364Sadrian return nbf; 4295227364Sadrian} 4296227364Sadrian 4297227364Sadrian/* 4298227364Sadrian * Handle retrying an unaggregate frame in an aggregate 4299227364Sadrian * session. 4300227364Sadrian * 4301227364Sadrian * If too many retries occur, pause the TID, wait for 4302227364Sadrian * any further retransmits (as there's no reason why 4303227364Sadrian * non-aggregate frames in an aggregate session are 4304227364Sadrian * transmitted in-order; they just have to be in-BAW) 4305227364Sadrian * and then queue a BAR. 4306227364Sadrian */ 4307227364Sadrianstatic void 4308227364Sadrianath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) 4309227364Sadrian{ 4310227364Sadrian struct ieee80211_node *ni = bf->bf_node; 4311227364Sadrian struct ath_node *an = ATH_NODE(ni); 4312227364Sadrian int tid = bf->bf_state.bfs_tid; 4313227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4314227364Sadrian struct ieee80211_tx_ampdu *tap; 4315227364Sadrian 4316243786Sadrian ATH_TX_LOCK(sc); 4317227364Sadrian 4318227364Sadrian tap = ath_tx_get_tx_tid(an, tid); 4319227364Sadrian 4320227364Sadrian /* 4321227364Sadrian * If the buffer is marked as busy, we can't directly 4322227364Sadrian * reuse it. Instead, try to clone the buffer. 4323227364Sadrian * If the clone is successful, recycle the old buffer. 4324227364Sadrian * If the clone is unsuccessful, set bfs_retries to max 4325227364Sadrian * to force the next bit of code to free the buffer 4326227364Sadrian * for us. 4327227364Sadrian */ 4328227364Sadrian if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4329227364Sadrian (bf->bf_flags & ATH_BUF_BUSY)) { 4330227364Sadrian struct ath_buf *nbf; 4331227398Sadrian nbf = ath_tx_retry_clone(sc, an, atid, bf); 4332227364Sadrian if (nbf) 4333227364Sadrian /* bf has been freed at this point */ 4334227364Sadrian bf = nbf; 4335227364Sadrian else 4336227364Sadrian bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4337227364Sadrian } 4338227364Sadrian 4339227364Sadrian if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4340227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4341227364Sadrian "%s: exceeded retries; seqno %d\n", 4342227364Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 4343227364Sadrian sc->sc_stats.ast_tx_swretrymax++; 4344227364Sadrian 4345227364Sadrian /* Update BAW anyway */ 4346227364Sadrian if (bf->bf_state.bfs_dobaw) { 4347227364Sadrian ath_tx_update_baw(sc, an, atid, bf); 4348227364Sadrian if (! bf->bf_state.bfs_addedbaw) 4349259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4350227364Sadrian "%s: wasn't added: seqno %d\n", 4351227364Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 4352227364Sadrian } 4353227364Sadrian bf->bf_state.bfs_dobaw = 0; 4354227364Sadrian 4355233908Sadrian /* Suspend the TX queue and get ready to send the BAR */ 4356233908Sadrian ath_tx_tid_bar_suspend(sc, atid); 4357227364Sadrian 4358233908Sadrian /* Send the BAR if there are no other frames waiting */ 4359233908Sadrian if (ath_tx_tid_bar_tx_ready(sc, atid)) 4360233908Sadrian ath_tx_tid_bar_tx(sc, atid); 4361227364Sadrian 4362243786Sadrian ATH_TX_UNLOCK(sc); 4363227364Sadrian 4364227364Sadrian /* Free buffer, bf is free after this call */ 4365227364Sadrian ath_tx_default_comp(sc, bf, 0); 4366227364Sadrian return; 4367227364Sadrian } 4368227364Sadrian 4369227364Sadrian /* 4370227364Sadrian * This increments the retry counter as well as 4371227364Sadrian * sets the retry flag in the ath_buf and packet 4372227364Sadrian * body. 4373227364Sadrian */ 4374227364Sadrian ath_tx_set_retry(sc, bf); 4375240639Sadrian sc->sc_stats.ast_tx_swretries++; 4376227364Sadrian 4377227364Sadrian /* 4378227364Sadrian * Insert this at the head of the queue, so it's 4379227364Sadrian * retried before any current/subsequent frames. 4380227364Sadrian */ 4381241336Sadrian ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4382227364Sadrian ath_tx_tid_sched(sc, atid); 4383233908Sadrian /* Send the BAR if there are no other frames waiting */ 4384233908Sadrian if (ath_tx_tid_bar_tx_ready(sc, atid)) 4385233908Sadrian ath_tx_tid_bar_tx(sc, atid); 4386227364Sadrian 4387243786Sadrian ATH_TX_UNLOCK(sc); 4388227364Sadrian} 4389227364Sadrian 4390227364Sadrian/* 4391227364Sadrian * Common code for aggregate excessive retry/subframe retry. 4392227364Sadrian * If retrying, queues buffers to bf_q. If not, frees the 4393227364Sadrian * buffers. 4394227364Sadrian * 4395227364Sadrian * XXX should unify this with ath_tx_aggr_retry_unaggr() 4396227364Sadrian */ 4397227364Sadrianstatic int 4398227364Sadrianath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, 4399227364Sadrian ath_bufhead *bf_q) 4400227364Sadrian{ 4401227364Sadrian struct ieee80211_node *ni = bf->bf_node; 4402227364Sadrian struct ath_node *an = ATH_NODE(ni); 4403227364Sadrian int tid = bf->bf_state.bfs_tid; 4404227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4405227364Sadrian 4406243786Sadrian ATH_TX_LOCK_ASSERT(sc); 4407227364Sadrian 4408240255Sadrian /* XXX clr11naggr should be done for all subframes */ 4409227364Sadrian ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 4410227364Sadrian ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); 4411240639Sadrian 4412227364Sadrian /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ 4413227364Sadrian 4414227364Sadrian /* 4415227364Sadrian * If the buffer is marked as busy, we can't directly 4416227364Sadrian * reuse it. Instead, try to clone the buffer. 4417227364Sadrian * If the clone is successful, recycle the old buffer. 4418227364Sadrian * If the clone is unsuccessful, set bfs_retries to max 4419227364Sadrian * to force the next bit of code to free the buffer 4420227364Sadrian * for us. 4421227364Sadrian */ 4422227364Sadrian if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && 4423227364Sadrian (bf->bf_flags & ATH_BUF_BUSY)) { 4424227364Sadrian struct ath_buf *nbf; 4425227398Sadrian nbf = ath_tx_retry_clone(sc, an, atid, bf); 4426227364Sadrian if (nbf) 4427227364Sadrian /* bf has been freed at this point */ 4428227364Sadrian bf = nbf; 4429227364Sadrian else 4430227364Sadrian bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; 4431227364Sadrian } 4432227364Sadrian 4433227364Sadrian if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { 4434227364Sadrian sc->sc_stats.ast_tx_swretrymax++; 4435227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, 4436227364Sadrian "%s: max retries: seqno %d\n", 4437227364Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 4438227364Sadrian ath_tx_update_baw(sc, an, atid, bf); 4439259341Srpaulo if (!bf->bf_state.bfs_addedbaw) 4440259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, 4441227364Sadrian "%s: wasn't added: seqno %d\n", 4442227364Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 4443227364Sadrian bf->bf_state.bfs_dobaw = 0; 4444227364Sadrian return 1; 4445227364Sadrian } 4446227364Sadrian 4447227364Sadrian ath_tx_set_retry(sc, bf); 4448240639Sadrian sc->sc_stats.ast_tx_swretries++; 4449227364Sadrian bf->bf_next = NULL; /* Just to make sure */ 4450227364Sadrian 4451240255Sadrian /* Clear the aggregate state */ 4452240255Sadrian bf->bf_state.bfs_aggr = 0; 4453240255Sadrian bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ 4454240255Sadrian bf->bf_state.bfs_nframes = 1; 4455240255Sadrian 4456227364Sadrian TAILQ_INSERT_TAIL(bf_q, bf, bf_list); 4457227364Sadrian return 0; 4458227364Sadrian} 4459227364Sadrian 4460227364Sadrian/* 4461227364Sadrian * error pkt completion for an aggregate destination 4462227364Sadrian */ 4463227364Sadrianstatic void 4464227364Sadrianath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, 4465227364Sadrian struct ath_tid *tid) 4466227364Sadrian{ 4467227364Sadrian struct ieee80211_node *ni = bf_first->bf_node; 4468227364Sadrian struct ath_node *an = ATH_NODE(ni); 4469227364Sadrian struct ath_buf *bf_next, *bf; 4470227364Sadrian ath_bufhead bf_q; 4471227364Sadrian int drops = 0; 4472227364Sadrian struct ieee80211_tx_ampdu *tap; 4473227364Sadrian ath_bufhead bf_cq; 4474227364Sadrian 4475227364Sadrian TAILQ_INIT(&bf_q); 4476227364Sadrian TAILQ_INIT(&bf_cq); 4477227364Sadrian 4478227364Sadrian /* 4479227364Sadrian * Update rate control - all frames have failed. 4480227364Sadrian * 4481227364Sadrian * XXX use the length in the first frame in the series; 4482227364Sadrian * XXX just so things are consistent for now. 4483227364Sadrian */ 4484227364Sadrian ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, 4485227364Sadrian &bf_first->bf_status.ds_txstat, 4486227364Sadrian bf_first->bf_state.bfs_pktlen, 4487227364Sadrian bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); 4488227364Sadrian 4489243786Sadrian ATH_TX_LOCK(sc); 4490227364Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 4491227868Sadrian sc->sc_stats.ast_tx_aggr_failall++; 4492227364Sadrian 4493227364Sadrian /* Retry all subframes */ 4494227364Sadrian bf = bf_first; 4495227364Sadrian while (bf) { 4496227364Sadrian bf_next = bf->bf_next; 4497227364Sadrian bf->bf_next = NULL; /* Remove it from the aggr list */ 4498227868Sadrian sc->sc_stats.ast_tx_aggr_fail++; 4499227364Sadrian if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4500227364Sadrian drops++; 4501227364Sadrian bf->bf_next = NULL; 4502227364Sadrian TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4503227364Sadrian } 4504227364Sadrian bf = bf_next; 4505227364Sadrian } 4506227364Sadrian 4507227364Sadrian /* Prepend all frames to the beginning of the queue */ 4508227364Sadrian while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4509227364Sadrian TAILQ_REMOVE(&bf_q, bf, bf_list); 4510241336Sadrian ATH_TID_INSERT_HEAD(tid, bf, bf_list); 4511227364Sadrian } 4512227364Sadrian 4513234725Sadrian /* 4514234725Sadrian * Schedule the TID to be re-tried. 4515234725Sadrian */ 4516227364Sadrian ath_tx_tid_sched(sc, tid); 4517227364Sadrian 4518227364Sadrian /* 4519227364Sadrian * send bar if we dropped any frames 4520227364Sadrian * 4521227364Sadrian * Keep the txq lock held for now, as we need to ensure 4522227364Sadrian * that ni_txseqs[] is consistent (as it's being updated 4523227364Sadrian * in the ifnet TX context or raw TX context.) 4524227364Sadrian */ 4525227364Sadrian if (drops) { 4526233908Sadrian /* Suspend the TX queue and get ready to send the BAR */ 4527233908Sadrian ath_tx_tid_bar_suspend(sc, tid); 4528227364Sadrian } 4529227364Sadrian 4530233908Sadrian /* 4531233908Sadrian * Send BAR if required 4532233908Sadrian */ 4533233908Sadrian if (ath_tx_tid_bar_tx_ready(sc, tid)) 4534233908Sadrian ath_tx_tid_bar_tx(sc, tid); 4535240639Sadrian 4536243786Sadrian ATH_TX_UNLOCK(sc); 4537233908Sadrian 4538227364Sadrian /* Complete frames which errored out */ 4539227364Sadrian while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4540227364Sadrian TAILQ_REMOVE(&bf_cq, bf, bf_list); 4541227364Sadrian ath_tx_default_comp(sc, bf, 0); 4542227364Sadrian } 4543227364Sadrian} 4544227364Sadrian 4545227364Sadrian/* 4546227364Sadrian * Handle clean-up of packets from an aggregate list. 4547227364Sadrian * 4548227364Sadrian * There's no need to update the BAW here - the session is being 4549227364Sadrian * torn down. 4550227364Sadrian */ 4551227364Sadrianstatic void 4552227364Sadrianath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) 4553227364Sadrian{ 4554227364Sadrian struct ath_buf *bf, *bf_next; 4555227364Sadrian struct ieee80211_node *ni = bf_first->bf_node; 4556227364Sadrian struct ath_node *an = ATH_NODE(ni); 4557227364Sadrian int tid = bf_first->bf_state.bfs_tid; 4558227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4559227364Sadrian 4560243786Sadrian ATH_TX_LOCK(sc); 4561227364Sadrian 4562227364Sadrian /* update incomp */ 4563248341Sadrian bf = bf_first; 4564227364Sadrian while (bf) { 4565227364Sadrian atid->incomp--; 4566227364Sadrian bf = bf->bf_next; 4567227364Sadrian } 4568227364Sadrian 4569227364Sadrian if (atid->incomp == 0) { 4570227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 4571227364Sadrian "%s: TID %d: cleaned up! resume!\n", 4572227364Sadrian __func__, tid); 4573227364Sadrian atid->cleanup_inprogress = 0; 4574227364Sadrian ath_tx_tid_resume(sc, atid); 4575227364Sadrian } 4576233908Sadrian 4577233908Sadrian /* Send BAR if required */ 4578240639Sadrian /* XXX why would we send a BAR when transitioning to non-aggregation? */ 4579248341Sadrian /* 4580248341Sadrian * XXX TODO: we should likely just tear down the BAR state here, 4581248341Sadrian * rather than sending a BAR. 4582248341Sadrian */ 4583233908Sadrian if (ath_tx_tid_bar_tx_ready(sc, atid)) 4584233908Sadrian ath_tx_tid_bar_tx(sc, atid); 4585240639Sadrian 4586243786Sadrian ATH_TX_UNLOCK(sc); 4587227364Sadrian 4588227364Sadrian /* Handle frame completion */ 4589248341Sadrian bf = bf_first; 4590227364Sadrian while (bf) { 4591227364Sadrian bf_next = bf->bf_next; 4592227364Sadrian ath_tx_default_comp(sc, bf, 1); 4593227364Sadrian bf = bf_next; 4594227364Sadrian } 4595227364Sadrian} 4596227364Sadrian 4597227364Sadrian/* 4598227364Sadrian * Handle completion of an set of aggregate frames. 4599227364Sadrian * 4600227364Sadrian * Note: the completion handler is the last descriptor in the aggregate, 4601227364Sadrian * not the last descriptor in the first frame. 4602227364Sadrian */ 4603227364Sadrianstatic void 4604229949Sadrianath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, 4605229949Sadrian int fail) 4606227364Sadrian{ 4607227364Sadrian //struct ath_desc *ds = bf->bf_lastds; 4608227364Sadrian struct ieee80211_node *ni = bf_first->bf_node; 4609227364Sadrian struct ath_node *an = ATH_NODE(ni); 4610227364Sadrian int tid = bf_first->bf_state.bfs_tid; 4611227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4612227364Sadrian struct ath_tx_status ts; 4613227364Sadrian struct ieee80211_tx_ampdu *tap; 4614227364Sadrian ath_bufhead bf_q; 4615227364Sadrian ath_bufhead bf_cq; 4616227364Sadrian int seq_st, tx_ok; 4617227364Sadrian int hasba, isaggr; 4618227364Sadrian uint32_t ba[2]; 4619227364Sadrian struct ath_buf *bf, *bf_next; 4620227364Sadrian int ba_index; 4621227364Sadrian int drops = 0; 4622227364Sadrian int nframes = 0, nbad = 0, nf; 4623227364Sadrian int pktlen; 4624227364Sadrian /* XXX there's too much on the stack? */ 4625236038Sadrian struct ath_rc_series rc[ATH_RC_NUM]; 4626227364Sadrian int txseq; 4627227364Sadrian 4628227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", 4629227364Sadrian __func__, atid->hwq_depth); 4630227364Sadrian 4631240677Sadrian /* 4632240677Sadrian * Take a copy; this may be needed -after- bf_first 4633240677Sadrian * has been completed and freed. 4634240677Sadrian */ 4635240677Sadrian ts = bf_first->bf_status.ds_txstat; 4636240677Sadrian 4637240639Sadrian TAILQ_INIT(&bf_q); 4638240639Sadrian TAILQ_INIT(&bf_cq); 4639240639Sadrian 4640227364Sadrian /* The TID state is kept behind the TXQ lock */ 4641243786Sadrian ATH_TX_LOCK(sc); 4642227364Sadrian 4643227364Sadrian atid->hwq_depth--; 4644227364Sadrian if (atid->hwq_depth < 0) 4645259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", 4646227364Sadrian __func__, atid->hwq_depth); 4647227364Sadrian 4648227364Sadrian /* 4649240639Sadrian * If the TID is filtered, handle completing the filter 4650240639Sadrian * transition before potentially kicking it to the cleanup 4651240639Sadrian * function. 4652240677Sadrian * 4653240677Sadrian * XXX this is duplicate work, ew. 4654240639Sadrian */ 4655240639Sadrian if (atid->isfiltered) 4656240639Sadrian ath_tx_tid_filt_comp_complete(sc, atid); 4657240639Sadrian 4658240639Sadrian /* 4659227364Sadrian * Punt cleanup to the relevant function, not our problem now 4660227364Sadrian */ 4661227364Sadrian if (atid->cleanup_inprogress) { 4662240639Sadrian if (atid->isfiltered) 4663259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4664240639Sadrian "%s: isfiltered=1, normal_comp?\n", 4665240639Sadrian __func__); 4666243786Sadrian ATH_TX_UNLOCK(sc); 4667227364Sadrian ath_tx_comp_cleanup_aggr(sc, bf_first); 4668227364Sadrian return; 4669227364Sadrian } 4670227364Sadrian 4671227364Sadrian /* 4672240639Sadrian * If the frame is filtered, transition to filtered frame 4673240639Sadrian * mode and add this to the filtered frame list. 4674240639Sadrian * 4675240639Sadrian * XXX TODO: figure out how this interoperates with 4676240639Sadrian * BAR, pause and cleanup states. 4677240639Sadrian */ 4678240639Sadrian if ((ts.ts_status & HAL_TXERR_FILT) || 4679240639Sadrian (ts.ts_status != 0 && atid->isfiltered)) { 4680240639Sadrian if (fail != 0) 4681259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4682240639Sadrian "%s: isfiltered=1, fail=%d\n", __func__, fail); 4683240639Sadrian ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); 4684240639Sadrian 4685240639Sadrian /* Remove from BAW */ 4686240639Sadrian TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { 4687240639Sadrian if (bf->bf_state.bfs_addedbaw) 4688240639Sadrian drops++; 4689240639Sadrian if (bf->bf_state.bfs_dobaw) { 4690240639Sadrian ath_tx_update_baw(sc, an, atid, bf); 4691259341Srpaulo if (!bf->bf_state.bfs_addedbaw) 4692259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4693240639Sadrian "%s: wasn't added: seqno %d\n", 4694240639Sadrian __func__, 4695240639Sadrian SEQNO(bf->bf_state.bfs_seqno)); 4696240639Sadrian } 4697240639Sadrian bf->bf_state.bfs_dobaw = 0; 4698240639Sadrian } 4699240639Sadrian /* 4700240639Sadrian * If any intermediate frames in the BAW were dropped when 4701240639Sadrian * handling filtering things, send a BAR. 4702240639Sadrian */ 4703240639Sadrian if (drops) 4704240639Sadrian ath_tx_tid_bar_suspend(sc, atid); 4705240639Sadrian 4706240639Sadrian /* 4707240639Sadrian * Finish up by sending a BAR if required and freeing 4708240639Sadrian * the frames outside of the TX lock. 4709240639Sadrian */ 4710240639Sadrian goto finish_send_bar; 4711240639Sadrian } 4712240639Sadrian 4713240639Sadrian /* 4714227364Sadrian * XXX for now, use the first frame in the aggregate for 4715227364Sadrian * XXX rate control completion; it's at least consistent. 4716227364Sadrian */ 4717227364Sadrian pktlen = bf_first->bf_state.bfs_pktlen; 4718227364Sadrian 4719227364Sadrian /* 4720235461Sadrian * Handle errors first! 4721235461Sadrian * 4722235461Sadrian * Here, handle _any_ error as a "exceeded retries" error. 4723235461Sadrian * Later on (when filtered frames are to be specially handled) 4724235461Sadrian * it'll have to be expanded. 4725227364Sadrian */ 4726235461Sadrian#if 0 4727227364Sadrian if (ts.ts_status & HAL_TXERR_XRETRY) { 4728235461Sadrian#endif 4729235461Sadrian if (ts.ts_status != 0) { 4730243786Sadrian ATH_TX_UNLOCK(sc); 4731227364Sadrian ath_tx_comp_aggr_error(sc, bf_first, atid); 4732227364Sadrian return; 4733227364Sadrian } 4734227364Sadrian 4735227364Sadrian tap = ath_tx_get_tx_tid(an, tid); 4736227364Sadrian 4737227364Sadrian /* 4738227364Sadrian * extract starting sequence and block-ack bitmap 4739227364Sadrian */ 4740227364Sadrian /* XXX endian-ness of seq_st, ba? */ 4741227364Sadrian seq_st = ts.ts_seqnum; 4742227364Sadrian hasba = !! (ts.ts_flags & HAL_TX_BA); 4743227364Sadrian tx_ok = (ts.ts_status == 0); 4744227364Sadrian isaggr = bf_first->bf_state.bfs_aggr; 4745227364Sadrian ba[0] = ts.ts_ba_low; 4746227364Sadrian ba[1] = ts.ts_ba_high; 4747227364Sadrian 4748227364Sadrian /* 4749227364Sadrian * Copy the TX completion status and the rate control 4750227364Sadrian * series from the first descriptor, as it may be freed 4751227364Sadrian * before the rate control code can get its grubby fingers 4752227364Sadrian * into things. 4753227364Sadrian */ 4754227364Sadrian memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); 4755227364Sadrian 4756227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4757229949Sadrian "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " 4758229949Sadrian "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", 4759227364Sadrian __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, 4760227364Sadrian isaggr, seq_st, hasba, ba[0], ba[1]); 4761227364Sadrian 4762248091Sadrian /* 4763248091Sadrian * The reference driver doesn't do this; it simply ignores 4764248091Sadrian * this check in its entirety. 4765248091Sadrian * 4766248091Sadrian * I've seen this occur when using iperf to send traffic 4767248091Sadrian * out tid 1 - the aggregate frames are all marked as TID 1, 4768248091Sadrian * but the TXSTATUS has TID=0. So, let's just ignore this 4769248091Sadrian * check. 4770248091Sadrian */ 4771248091Sadrian#if 0 4772227364Sadrian /* Occasionally, the MAC sends a tx status for the wrong TID. */ 4773227364Sadrian if (tid != ts.ts_tid) { 4774259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", 4775227364Sadrian __func__, tid, ts.ts_tid); 4776227364Sadrian tx_ok = 0; 4777227364Sadrian } 4778248091Sadrian#endif 4779227364Sadrian 4780227364Sadrian /* AR5416 BA bug; this requires an interface reset */ 4781227364Sadrian if (isaggr && tx_ok && (! hasba)) { 4782259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4783229949Sadrian "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " 4784229949Sadrian "seq_st=%d\n", 4785227364Sadrian __func__, hasba, tx_ok, isaggr, seq_st); 4786227364Sadrian /* XXX TODO: schedule an interface reset */ 4787238350Sjhb#ifdef ATH_DEBUG 4788238338Sadrian ath_printtxbuf(sc, bf_first, 4789238338Sadrian sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); 4790238350Sjhb#endif 4791227364Sadrian } 4792227364Sadrian 4793227364Sadrian /* 4794227364Sadrian * Walk the list of frames, figure out which ones were correctly 4795227364Sadrian * sent and which weren't. 4796227364Sadrian */ 4797227364Sadrian bf = bf_first; 4798227364Sadrian nf = bf_first->bf_state.bfs_nframes; 4799227364Sadrian 4800227364Sadrian /* bf_first is going to be invalid once this list is walked */ 4801227364Sadrian bf_first = NULL; 4802227364Sadrian 4803227364Sadrian /* 4804227364Sadrian * Walk the list of completed frames and determine 4805227364Sadrian * which need to be completed and which need to be 4806227364Sadrian * retransmitted. 4807227364Sadrian * 4808227364Sadrian * For completed frames, the completion functions need 4809227364Sadrian * to be called at the end of this function as the last 4810227364Sadrian * node reference may free the node. 4811227364Sadrian * 4812227364Sadrian * Finally, since the TXQ lock can't be held during the 4813227364Sadrian * completion callback (to avoid lock recursion), 4814227364Sadrian * the completion calls have to be done outside of the 4815227364Sadrian * lock. 4816227364Sadrian */ 4817227364Sadrian while (bf) { 4818227364Sadrian nframes++; 4819229949Sadrian ba_index = ATH_BA_INDEX(seq_st, 4820229949Sadrian SEQNO(bf->bf_state.bfs_seqno)); 4821227364Sadrian bf_next = bf->bf_next; 4822227364Sadrian bf->bf_next = NULL; /* Remove it from the aggr list */ 4823227364Sadrian 4824227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4825227364Sadrian "%s: checking bf=%p seqno=%d; ack=%d\n", 4826227364Sadrian __func__, bf, SEQNO(bf->bf_state.bfs_seqno), 4827227364Sadrian ATH_BA_ISSET(ba, ba_index)); 4828227364Sadrian 4829227364Sadrian if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { 4830227868Sadrian sc->sc_stats.ast_tx_aggr_ok++; 4831227364Sadrian ath_tx_update_baw(sc, an, atid, bf); 4832227364Sadrian bf->bf_state.bfs_dobaw = 0; 4833259341Srpaulo if (!bf->bf_state.bfs_addedbaw) 4834259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4835227364Sadrian "%s: wasn't added: seqno %d\n", 4836227364Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 4837227364Sadrian bf->bf_next = NULL; 4838227364Sadrian TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4839227364Sadrian } else { 4840227868Sadrian sc->sc_stats.ast_tx_aggr_fail++; 4841227364Sadrian if (ath_tx_retry_subframe(sc, bf, &bf_q)) { 4842227364Sadrian drops++; 4843227364Sadrian bf->bf_next = NULL; 4844227364Sadrian TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); 4845227364Sadrian } 4846227364Sadrian nbad++; 4847227364Sadrian } 4848227364Sadrian bf = bf_next; 4849227364Sadrian } 4850227364Sadrian 4851227364Sadrian /* 4852227364Sadrian * Now that the BAW updates have been done, unlock 4853227364Sadrian * 4854227364Sadrian * txseq is grabbed before the lock is released so we 4855227364Sadrian * have a consistent view of what -was- in the BAW. 4856227364Sadrian * Anything after this point will not yet have been 4857227364Sadrian * TXed. 4858227364Sadrian */ 4859227364Sadrian txseq = tap->txa_start; 4860243786Sadrian ATH_TX_UNLOCK(sc); 4861227364Sadrian 4862227364Sadrian if (nframes != nf) 4863259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4864227364Sadrian "%s: num frames seen=%d; bf nframes=%d\n", 4865227364Sadrian __func__, nframes, nf); 4866227364Sadrian 4867227364Sadrian /* 4868227364Sadrian * Now we know how many frames were bad, call the rate 4869227364Sadrian * control code. 4870227364Sadrian */ 4871227364Sadrian if (fail == 0) 4872229949Sadrian ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, 4873229949Sadrian nbad); 4874227364Sadrian 4875227364Sadrian /* 4876227364Sadrian * send bar if we dropped any frames 4877227364Sadrian */ 4878227364Sadrian if (drops) { 4879233908Sadrian /* Suspend the TX queue and get ready to send the BAR */ 4880243786Sadrian ATH_TX_LOCK(sc); 4881233908Sadrian ath_tx_tid_bar_suspend(sc, atid); 4882243786Sadrian ATH_TX_UNLOCK(sc); 4883227364Sadrian } 4884227364Sadrian 4885234725Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 4886234725Sadrian "%s: txa_start now %d\n", __func__, tap->txa_start); 4887234725Sadrian 4888243786Sadrian ATH_TX_LOCK(sc); 4889234725Sadrian 4890227364Sadrian /* Prepend all frames to the beginning of the queue */ 4891227364Sadrian while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { 4892227364Sadrian TAILQ_REMOVE(&bf_q, bf, bf_list); 4893241336Sadrian ATH_TID_INSERT_HEAD(atid, bf, bf_list); 4894227364Sadrian } 4895234725Sadrian 4896234725Sadrian /* 4897234725Sadrian * Reschedule to grab some further frames. 4898234725Sadrian */ 4899227364Sadrian ath_tx_tid_sched(sc, atid); 4900227364Sadrian 4901233908Sadrian /* 4902240639Sadrian * If the queue is filtered, re-schedule as required. 4903240639Sadrian * 4904240639Sadrian * This is required as there may be a subsequent TX descriptor 4905240639Sadrian * for this end-node that has CLRDMASK set, so it's quite possible 4906240639Sadrian * that a filtered frame will be followed by a non-filtered 4907240639Sadrian * (complete or otherwise) frame. 4908240639Sadrian * 4909240639Sadrian * XXX should we do this before we complete the frame? 4910240639Sadrian */ 4911240639Sadrian if (atid->isfiltered) 4912240639Sadrian ath_tx_tid_filt_comp_complete(sc, atid); 4913240639Sadrian 4914240639Sadrianfinish_send_bar: 4915240639Sadrian 4916240639Sadrian /* 4917233908Sadrian * Send BAR if required 4918233908Sadrian */ 4919233908Sadrian if (ath_tx_tid_bar_tx_ready(sc, atid)) 4920233908Sadrian ath_tx_tid_bar_tx(sc, atid); 4921234725Sadrian 4922243786Sadrian ATH_TX_UNLOCK(sc); 4923233908Sadrian 4924227364Sadrian /* Do deferred completion */ 4925227364Sadrian while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 4926227364Sadrian TAILQ_REMOVE(&bf_cq, bf, bf_list); 4927227364Sadrian ath_tx_default_comp(sc, bf, 0); 4928227364Sadrian } 4929227364Sadrian} 4930227364Sadrian 4931227364Sadrian/* 4932227364Sadrian * Handle completion of unaggregated frames in an ADDBA 4933227364Sadrian * session. 4934227364Sadrian * 4935227364Sadrian * Fail is set to 1 if the entry is being freed via a call to 4936227364Sadrian * ath_tx_draintxq(). 4937227364Sadrian */ 4938227364Sadrianstatic void 4939227364Sadrianath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) 4940227364Sadrian{ 4941227364Sadrian struct ieee80211_node *ni = bf->bf_node; 4942227364Sadrian struct ath_node *an = ATH_NODE(ni); 4943227364Sadrian int tid = bf->bf_state.bfs_tid; 4944227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 4945240677Sadrian struct ath_tx_status ts; 4946240639Sadrian int drops = 0; 4947227364Sadrian 4948227364Sadrian /* 4949240677Sadrian * Take a copy of this; filtering/cloning the frame may free the 4950240677Sadrian * bf pointer. 4951240677Sadrian */ 4952240677Sadrian ts = bf->bf_status.ds_txstat; 4953240677Sadrian 4954240677Sadrian /* 4955227364Sadrian * Update rate control status here, before we possibly 4956227364Sadrian * punt to retry or cleanup. 4957227364Sadrian * 4958227364Sadrian * Do it outside of the TXQ lock. 4959227364Sadrian */ 4960233966Sadrian if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) 4961227364Sadrian ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, 4962227364Sadrian &bf->bf_status.ds_txstat, 4963227364Sadrian bf->bf_state.bfs_pktlen, 4964240677Sadrian 1, (ts.ts_status == 0) ? 0 : 1); 4965227364Sadrian 4966227364Sadrian /* 4967227364Sadrian * This is called early so atid->hwq_depth can be tracked. 4968227364Sadrian * This unfortunately means that it's released and regrabbed 4969227364Sadrian * during retry and cleanup. That's rather inefficient. 4970227364Sadrian */ 4971243786Sadrian ATH_TX_LOCK(sc); 4972227364Sadrian 4973227364Sadrian if (tid == IEEE80211_NONQOS_TID) 4974259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); 4975227364Sadrian 4976229949Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, 4977229949Sadrian "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", 4978229949Sadrian __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, 4979229949Sadrian SEQNO(bf->bf_state.bfs_seqno)); 4980227364Sadrian 4981227364Sadrian atid->hwq_depth--; 4982227364Sadrian if (atid->hwq_depth < 0) 4983259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", 4984227364Sadrian __func__, atid->hwq_depth); 4985227364Sadrian 4986227364Sadrian /* 4987240639Sadrian * If the TID is filtered, handle completing the filter 4988240639Sadrian * transition before potentially kicking it to the cleanup 4989240639Sadrian * function. 4990240639Sadrian */ 4991240639Sadrian if (atid->isfiltered) 4992240639Sadrian ath_tx_tid_filt_comp_complete(sc, atid); 4993240639Sadrian 4994240639Sadrian /* 4995227364Sadrian * If a cleanup is in progress, punt to comp_cleanup; 4996227364Sadrian * rather than handling it here. It's thus their 4997227364Sadrian * responsibility to clean up, call the completion 4998227364Sadrian * function in net80211, etc. 4999227364Sadrian */ 5000227364Sadrian if (atid->cleanup_inprogress) { 5001240639Sadrian if (atid->isfiltered) 5002259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 5003240639Sadrian "%s: isfiltered=1, normal_comp?\n", 5004240639Sadrian __func__); 5005243786Sadrian ATH_TX_UNLOCK(sc); 5006229949Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", 5007229949Sadrian __func__); 5008227364Sadrian ath_tx_comp_cleanup_unaggr(sc, bf); 5009227364Sadrian return; 5010227364Sadrian } 5011227364Sadrian 5012227364Sadrian /* 5013240639Sadrian * XXX TODO: how does cleanup, BAR and filtered frame handling 5014240639Sadrian * overlap? 5015240639Sadrian * 5016240639Sadrian * If the frame is filtered OR if it's any failure but 5017240639Sadrian * the TID is filtered, the frame must be added to the 5018240639Sadrian * filtered frame list. 5019240639Sadrian * 5020240639Sadrian * However - a busy buffer can't be added to the filtered 5021240639Sadrian * list as it will end up being recycled without having 5022240639Sadrian * been made available for the hardware. 5023240639Sadrian */ 5024240677Sadrian if ((ts.ts_status & HAL_TXERR_FILT) || 5025240677Sadrian (ts.ts_status != 0 && atid->isfiltered)) { 5026240639Sadrian int freeframe; 5027240639Sadrian 5028240639Sadrian if (fail != 0) 5029259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 5030240639Sadrian "%s: isfiltered=1, fail=%d\n", 5031259341Srpaulo __func__, fail); 5032240639Sadrian freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); 5033240639Sadrian if (freeframe) { 5034240639Sadrian /* Remove from BAW */ 5035240639Sadrian if (bf->bf_state.bfs_addedbaw) 5036240639Sadrian drops++; 5037240639Sadrian if (bf->bf_state.bfs_dobaw) { 5038240639Sadrian ath_tx_update_baw(sc, an, atid, bf); 5039259341Srpaulo if (!bf->bf_state.bfs_addedbaw) 5040259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 5041240639Sadrian "%s: wasn't added: seqno %d\n", 5042240639Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 5043240639Sadrian } 5044240639Sadrian bf->bf_state.bfs_dobaw = 0; 5045240639Sadrian } 5046240639Sadrian 5047240639Sadrian /* 5048240639Sadrian * If the frame couldn't be filtered, treat it as a drop and 5049240639Sadrian * prepare to send a BAR. 5050240639Sadrian */ 5051240639Sadrian if (freeframe && drops) 5052240639Sadrian ath_tx_tid_bar_suspend(sc, atid); 5053240639Sadrian 5054240639Sadrian /* 5055240639Sadrian * Send BAR if required 5056240639Sadrian */ 5057240639Sadrian if (ath_tx_tid_bar_tx_ready(sc, atid)) 5058240639Sadrian ath_tx_tid_bar_tx(sc, atid); 5059240639Sadrian 5060243786Sadrian ATH_TX_UNLOCK(sc); 5061240639Sadrian /* 5062240639Sadrian * If freeframe is set, then the frame couldn't be 5063240639Sadrian * cloned and bf is still valid. Just complete/free it. 5064240639Sadrian */ 5065240639Sadrian if (freeframe) 5066240639Sadrian ath_tx_default_comp(sc, bf, fail); 5067240639Sadrian 5068240639Sadrian 5069240639Sadrian return; 5070240639Sadrian } 5071240639Sadrian /* 5072227364Sadrian * Don't bother with the retry check if all frames 5073227364Sadrian * are being failed (eg during queue deletion.) 5074227364Sadrian */ 5075235461Sadrian#if 0 5076227364Sadrian if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { 5077235461Sadrian#endif 5078240677Sadrian if (fail == 0 && ts.ts_status != 0) { 5079243786Sadrian ATH_TX_UNLOCK(sc); 5080229949Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", 5081229949Sadrian __func__); 5082227364Sadrian ath_tx_aggr_retry_unaggr(sc, bf); 5083227364Sadrian return; 5084227364Sadrian } 5085227364Sadrian 5086227364Sadrian /* Success? Complete */ 5087227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", 5088227364Sadrian __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); 5089227364Sadrian if (bf->bf_state.bfs_dobaw) { 5090227364Sadrian ath_tx_update_baw(sc, an, atid, bf); 5091227364Sadrian bf->bf_state.bfs_dobaw = 0; 5092259341Srpaulo if (!bf->bf_state.bfs_addedbaw) 5093259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 5094227364Sadrian "%s: wasn't added: seqno %d\n", 5095227364Sadrian __func__, SEQNO(bf->bf_state.bfs_seqno)); 5096227364Sadrian } 5097227364Sadrian 5098233908Sadrian /* 5099240639Sadrian * If the queue is filtered, re-schedule as required. 5100240639Sadrian * 5101240639Sadrian * This is required as there may be a subsequent TX descriptor 5102240639Sadrian * for this end-node that has CLRDMASK set, so it's quite possible 5103240639Sadrian * that a filtered frame will be followed by a non-filtered 5104240639Sadrian * (complete or otherwise) frame. 5105240639Sadrian * 5106240639Sadrian * XXX should we do this before we complete the frame? 5107240639Sadrian */ 5108240639Sadrian if (atid->isfiltered) 5109240639Sadrian ath_tx_tid_filt_comp_complete(sc, atid); 5110240639Sadrian 5111240639Sadrian /* 5112233908Sadrian * Send BAR if required 5113233908Sadrian */ 5114233908Sadrian if (ath_tx_tid_bar_tx_ready(sc, atid)) 5115233908Sadrian ath_tx_tid_bar_tx(sc, atid); 5116233908Sadrian 5117243786Sadrian ATH_TX_UNLOCK(sc); 5118227364Sadrian 5119227364Sadrian ath_tx_default_comp(sc, bf, fail); 5120227364Sadrian /* bf is freed at this point */ 5121227364Sadrian} 5122227364Sadrian 5123227364Sadrianvoid 5124227364Sadrianath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 5125227364Sadrian{ 5126227364Sadrian if (bf->bf_state.bfs_aggr) 5127227364Sadrian ath_tx_aggr_comp_aggr(sc, bf, fail); 5128227364Sadrian else 5129227364Sadrian ath_tx_aggr_comp_unaggr(sc, bf, fail); 5130227364Sadrian} 5131227364Sadrian 5132227364Sadrian/* 5133227364Sadrian * Schedule some packets from the given node/TID to the hardware. 5134227364Sadrian * 5135227364Sadrian * This is the aggregate version. 5136227364Sadrian */ 5137227364Sadrianvoid 5138227364Sadrianath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, 5139227364Sadrian struct ath_tid *tid) 5140227364Sadrian{ 5141227364Sadrian struct ath_buf *bf; 5142227364Sadrian struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5143227364Sadrian struct ieee80211_tx_ampdu *tap; 5144227364Sadrian ATH_AGGR_STATUS status; 5145227364Sadrian ath_bufhead bf_q; 5146227364Sadrian 5147227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); 5148243786Sadrian ATH_TX_LOCK_ASSERT(sc); 5149227364Sadrian 5150250665Sadrian /* 5151250665Sadrian * XXX TODO: If we're called for a queue that we're leaking frames to, 5152250665Sadrian * ensure we only leak one. 5153250665Sadrian */ 5154250665Sadrian 5155227364Sadrian tap = ath_tx_get_tx_tid(an, tid->tid); 5156227364Sadrian 5157227364Sadrian if (tid->tid == IEEE80211_NONQOS_TID) 5158259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 5159259341Srpaulo "%s: called for TID=NONQOS_TID?\n", __func__); 5160227364Sadrian 5161227364Sadrian for (;;) { 5162227364Sadrian status = ATH_AGGR_DONE; 5163227364Sadrian 5164227364Sadrian /* 5165227364Sadrian * If the upper layer has paused the TID, don't 5166227364Sadrian * queue any further packets. 5167227364Sadrian * 5168227364Sadrian * This can also occur from the completion task because 5169227364Sadrian * of packet loss; but as its serialised with this code, 5170227364Sadrian * it won't "appear" half way through queuing packets. 5171227364Sadrian */ 5172250665Sadrian if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5173227364Sadrian break; 5174227364Sadrian 5175241336Sadrian bf = ATH_TID_FIRST(tid); 5176227364Sadrian if (bf == NULL) { 5177227364Sadrian break; 5178227364Sadrian } 5179227364Sadrian 5180227364Sadrian /* 5181227364Sadrian * If the packet doesn't fall within the BAW (eg a NULL 5182227364Sadrian * data frame), schedule it directly; continue. 5183227364Sadrian */ 5184227364Sadrian if (! bf->bf_state.bfs_dobaw) { 5185229949Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5186229949Sadrian "%s: non-baw packet\n", 5187227364Sadrian __func__); 5188241336Sadrian ATH_TID_REMOVE(tid, bf, bf_list); 5189240180Sadrian 5190240180Sadrian if (bf->bf_state.bfs_nframes > 1) 5191259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, 5192240180Sadrian "%s: aggr=%d, nframes=%d\n", 5193240180Sadrian __func__, 5194240180Sadrian bf->bf_state.bfs_aggr, 5195240180Sadrian bf->bf_state.bfs_nframes); 5196240180Sadrian 5197240180Sadrian /* 5198240180Sadrian * This shouldn't happen - such frames shouldn't 5199240180Sadrian * ever have been queued as an aggregate in the 5200240180Sadrian * first place. However, make sure the fields 5201240180Sadrian * are correctly setup just to be totally sure. 5202240180Sadrian */ 5203227364Sadrian bf->bf_state.bfs_aggr = 0; 5204240180Sadrian bf->bf_state.bfs_nframes = 1; 5205240180Sadrian 5206240724Sadrian /* Update CLRDMASK just before this frame is queued */ 5207240724Sadrian ath_tx_update_clrdmask(sc, tid, bf); 5208240724Sadrian 5209227364Sadrian ath_tx_do_ratelookup(sc, bf); 5210233989Sadrian ath_tx_calc_duration(sc, bf); 5211233989Sadrian ath_tx_calc_protection(sc, bf); 5212233989Sadrian ath_tx_set_rtscts(sc, bf); 5213227364Sadrian ath_tx_rate_fill_rcflags(sc, bf); 5214227364Sadrian ath_tx_setds(sc, bf); 5215227364Sadrian ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5216227364Sadrian 5217227364Sadrian sc->sc_aggr_stats.aggr_nonbaw_pkt++; 5218227364Sadrian 5219227364Sadrian /* Queue the packet; continue */ 5220227364Sadrian goto queuepkt; 5221227364Sadrian } 5222227364Sadrian 5223227364Sadrian TAILQ_INIT(&bf_q); 5224227364Sadrian 5225227364Sadrian /* 5226227364Sadrian * Do a rate control lookup on the first frame in the 5227227364Sadrian * list. The rate control code needs that to occur 5228227364Sadrian * before it can determine whether to TX. 5229227364Sadrian * It's inaccurate because the rate control code doesn't 5230227364Sadrian * really "do" aggregate lookups, so it only considers 5231227364Sadrian * the size of the first frame. 5232227364Sadrian */ 5233227364Sadrian ath_tx_do_ratelookup(sc, bf); 5234227364Sadrian bf->bf_state.bfs_rc[3].rix = 0; 5235227364Sadrian bf->bf_state.bfs_rc[3].tries = 0; 5236233989Sadrian 5237233989Sadrian ath_tx_calc_duration(sc, bf); 5238233989Sadrian ath_tx_calc_protection(sc, bf); 5239233989Sadrian 5240233989Sadrian ath_tx_set_rtscts(sc, bf); 5241227364Sadrian ath_tx_rate_fill_rcflags(sc, bf); 5242227364Sadrian 5243227364Sadrian status = ath_tx_form_aggr(sc, an, tid, &bf_q); 5244227364Sadrian 5245227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5246227364Sadrian "%s: ath_tx_form_aggr() status=%d\n", __func__, status); 5247227364Sadrian 5248227364Sadrian /* 5249227364Sadrian * No frames to be picked up - out of BAW 5250227364Sadrian */ 5251227364Sadrian if (TAILQ_EMPTY(&bf_q)) 5252227364Sadrian break; 5253227364Sadrian 5254227364Sadrian /* 5255227364Sadrian * This assumes that the descriptor list in the ath_bufhead 5256227364Sadrian * are already linked together via bf_next pointers. 5257227364Sadrian */ 5258227364Sadrian bf = TAILQ_FIRST(&bf_q); 5259227364Sadrian 5260233989Sadrian if (status == ATH_AGGR_8K_LIMITED) 5261233989Sadrian sc->sc_aggr_stats.aggr_rts_aggr_limited++; 5262233989Sadrian 5263227364Sadrian /* 5264227364Sadrian * If it's the only frame send as non-aggregate 5265227364Sadrian * assume that ath_tx_form_aggr() has checked 5266227364Sadrian * whether it's in the BAW and added it appropriately. 5267227364Sadrian */ 5268227364Sadrian if (bf->bf_state.bfs_nframes == 1) { 5269227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5270227364Sadrian "%s: single-frame aggregate\n", __func__); 5271240724Sadrian 5272240724Sadrian /* Update CLRDMASK just before this frame is queued */ 5273240724Sadrian ath_tx_update_clrdmask(sc, tid, bf); 5274240724Sadrian 5275227364Sadrian bf->bf_state.bfs_aggr = 0; 5276240255Sadrian bf->bf_state.bfs_ndelim = 0; 5277227364Sadrian ath_tx_setds(sc, bf); 5278227364Sadrian ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); 5279227364Sadrian if (status == ATH_AGGR_BAW_CLOSED) 5280227364Sadrian sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; 5281227364Sadrian else 5282227364Sadrian sc->sc_aggr_stats.aggr_single_pkt++; 5283227364Sadrian } else { 5284227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, 5285229949Sadrian "%s: multi-frame aggregate: %d frames, " 5286229949Sadrian "length %d\n", 5287227364Sadrian __func__, bf->bf_state.bfs_nframes, 5288227364Sadrian bf->bf_state.bfs_al); 5289227364Sadrian bf->bf_state.bfs_aggr = 1; 5290227364Sadrian sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; 5291227364Sadrian sc->sc_aggr_stats.aggr_aggr_pkt++; 5292227364Sadrian 5293240724Sadrian /* Update CLRDMASK just before this frame is queued */ 5294240724Sadrian ath_tx_update_clrdmask(sc, tid, bf); 5295240724Sadrian 5296227364Sadrian /* 5297233989Sadrian * Calculate the duration/protection as required. 5298233989Sadrian */ 5299233989Sadrian ath_tx_calc_duration(sc, bf); 5300233989Sadrian ath_tx_calc_protection(sc, bf); 5301233989Sadrian 5302233989Sadrian /* 5303227364Sadrian * Update the rate and rtscts information based on the 5304227364Sadrian * rate decision made by the rate control code; 5305227364Sadrian * the first frame in the aggregate needs it. 5306227364Sadrian */ 5307227364Sadrian ath_tx_set_rtscts(sc, bf); 5308227364Sadrian 5309227364Sadrian /* 5310227364Sadrian * Setup the relevant descriptor fields 5311227364Sadrian * for aggregation. The first descriptor 5312227364Sadrian * already points to the rest in the chain. 5313227364Sadrian */ 5314227364Sadrian ath_tx_setds_11n(sc, bf); 5315227364Sadrian 5316227364Sadrian } 5317227364Sadrian queuepkt: 5318227364Sadrian /* Set completion handler, multi-frame aggregate or not */ 5319227364Sadrian bf->bf_comp = ath_tx_aggr_comp; 5320227364Sadrian 5321227364Sadrian if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) 5322259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); 5323227364Sadrian 5324250665Sadrian /* 5325250665Sadrian * Update leak count and frame config if were leaking frames. 5326250665Sadrian * 5327250665Sadrian * XXX TODO: it should update all frames in an aggregate 5328250665Sadrian * correctly! 5329250665Sadrian */ 5330250665Sadrian ath_tx_leak_count_update(sc, tid, bf); 5331250665Sadrian 5332227364Sadrian /* Punt to txq */ 5333227364Sadrian ath_tx_handoff(sc, txq, bf); 5334227364Sadrian 5335227364Sadrian /* Track outstanding buffer count to hardware */ 5336227364Sadrian /* aggregates are "one" buffer */ 5337227364Sadrian tid->hwq_depth++; 5338227364Sadrian 5339227364Sadrian /* 5340227364Sadrian * Break out if ath_tx_form_aggr() indicated 5341227364Sadrian * there can't be any further progress (eg BAW is full.) 5342227364Sadrian * Checking for an empty txq is done above. 5343227364Sadrian * 5344227364Sadrian * XXX locking on txq here? 5345227364Sadrian */ 5346250866Sadrian /* XXX TXQ locking */ 5347250866Sadrian if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || 5348250665Sadrian (status == ATH_AGGR_BAW_CLOSED || 5349250665Sadrian status == ATH_AGGR_LEAK_CLOSED)) 5350227364Sadrian break; 5351227364Sadrian } 5352227364Sadrian} 5353227364Sadrian 5354227364Sadrian/* 5355227364Sadrian * Schedule some packets from the given node/TID to the hardware. 5356250866Sadrian * 5357250866Sadrian * XXX TODO: this routine doesn't enforce the maximum TXQ depth. 5358250866Sadrian * It just dumps frames into the TXQ. We should limit how deep 5359250866Sadrian * the transmit queue can grow for frames dispatched to the given 5360250866Sadrian * TXQ. 5361250866Sadrian * 5362250866Sadrian * To avoid locking issues, either we need to own the TXQ lock 5363250866Sadrian * at this point, or we need to pass in the maximum frame count 5364250866Sadrian * from the caller. 5365227364Sadrian */ 5366227364Sadrianvoid 5367227364Sadrianath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, 5368227364Sadrian struct ath_tid *tid) 5369227364Sadrian{ 5370227364Sadrian struct ath_buf *bf; 5371227364Sadrian struct ath_txq *txq = sc->sc_ac2q[tid->ac]; 5372227364Sadrian 5373227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", 5374227364Sadrian __func__, an, tid->tid); 5375227364Sadrian 5376243786Sadrian ATH_TX_LOCK_ASSERT(sc); 5377227364Sadrian 5378227364Sadrian /* Check - is AMPDU pending or running? then print out something */ 5379227364Sadrian if (ath_tx_ampdu_pending(sc, an, tid->tid)) 5380259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", 5381227364Sadrian __func__, tid->tid); 5382227364Sadrian if (ath_tx_ampdu_running(sc, an, tid->tid)) 5383259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", 5384227364Sadrian __func__, tid->tid); 5385227364Sadrian 5386227364Sadrian for (;;) { 5387227364Sadrian 5388227364Sadrian /* 5389227364Sadrian * If the upper layers have paused the TID, don't 5390227364Sadrian * queue any further packets. 5391250665Sadrian * 5392250665Sadrian * XXX if we are leaking frames, make sure we decrement 5393250665Sadrian * that counter _and_ we continue here. 5394227364Sadrian */ 5395250665Sadrian if (! ath_tx_tid_can_tx_or_sched(sc, tid)) 5396227364Sadrian break; 5397227364Sadrian 5398241336Sadrian bf = ATH_TID_FIRST(tid); 5399227364Sadrian if (bf == NULL) { 5400227364Sadrian break; 5401227364Sadrian } 5402227364Sadrian 5403241336Sadrian ATH_TID_REMOVE(tid, bf, bf_list); 5404227364Sadrian 5405227364Sadrian /* Sanity check! */ 5406227364Sadrian if (tid->tid != bf->bf_state.bfs_tid) { 5407259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" 5408259341Srpaulo " tid %d\n", __func__, bf->bf_state.bfs_tid, 5409259341Srpaulo tid->tid); 5410227364Sadrian } 5411227364Sadrian /* Normal completion handler */ 5412227364Sadrian bf->bf_comp = ath_tx_normal_comp; 5413227364Sadrian 5414240883Sadrian /* 5415240883Sadrian * Override this for now, until the non-aggregate 5416240883Sadrian * completion handler correctly handles software retransmits. 5417240883Sadrian */ 5418240883Sadrian bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; 5419240883Sadrian 5420240724Sadrian /* Update CLRDMASK just before this frame is queued */ 5421240724Sadrian ath_tx_update_clrdmask(sc, tid, bf); 5422240724Sadrian 5423227364Sadrian /* Program descriptors + rate control */ 5424227364Sadrian ath_tx_do_ratelookup(sc, bf); 5425233989Sadrian ath_tx_calc_duration(sc, bf); 5426233989Sadrian ath_tx_calc_protection(sc, bf); 5427233989Sadrian ath_tx_set_rtscts(sc, bf); 5428227364Sadrian ath_tx_rate_fill_rcflags(sc, bf); 5429227364Sadrian ath_tx_setds(sc, bf); 5430227364Sadrian 5431250665Sadrian /* 5432250665Sadrian * Update the current leak count if 5433250665Sadrian * we're leaking frames; and set the 5434250665Sadrian * MORE flag as appropriate. 5435250665Sadrian */ 5436250665Sadrian ath_tx_leak_count_update(sc, tid, bf); 5437250665Sadrian 5438227364Sadrian /* Track outstanding buffer count to hardware */ 5439227364Sadrian /* aggregates are "one" buffer */ 5440227364Sadrian tid->hwq_depth++; 5441227364Sadrian 5442227364Sadrian /* Punt to hardware or software txq */ 5443227364Sadrian ath_tx_handoff(sc, txq, bf); 5444227364Sadrian } 5445227364Sadrian} 5446227364Sadrian 5447227364Sadrian/* 5448227364Sadrian * Schedule some packets to the given hardware queue. 5449227364Sadrian * 5450227364Sadrian * This function walks the list of TIDs (ie, ath_node TIDs 5451227364Sadrian * with queued traffic) and attempts to schedule traffic 5452227364Sadrian * from them. 5453227364Sadrian * 5454227364Sadrian * TID scheduling is implemented as a FIFO, with TIDs being 5455227364Sadrian * added to the end of the queue after some frames have been 5456227364Sadrian * scheduled. 5457227364Sadrian */ 5458227364Sadrianvoid 5459227364Sadrianath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) 5460227364Sadrian{ 5461227364Sadrian struct ath_tid *tid, *next, *last; 5462227364Sadrian 5463243786Sadrian ATH_TX_LOCK_ASSERT(sc); 5464227364Sadrian 5465227364Sadrian /* 5466227364Sadrian * Don't schedule if the hardware queue is busy. 5467227364Sadrian * This (hopefully) gives some more time to aggregate 5468227364Sadrian * some packets in the aggregation queue. 5469250866Sadrian * 5470250866Sadrian * XXX It doesn't stop a parallel sender from sneaking 5471250866Sadrian * in transmitting a frame! 5472227364Sadrian */ 5473250866Sadrian /* XXX TXQ locking */ 5474250866Sadrian if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5475227364Sadrian sc->sc_aggr_stats.aggr_sched_nopkt++; 5476227364Sadrian return; 5477227364Sadrian } 5478250866Sadrian if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5479250866Sadrian sc->sc_aggr_stats.aggr_sched_nopkt++; 5480250866Sadrian return; 5481250866Sadrian } 5482227364Sadrian 5483227364Sadrian last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); 5484227364Sadrian 5485227364Sadrian TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { 5486227364Sadrian /* 5487227364Sadrian * Suspend paused queues here; they'll be resumed 5488227364Sadrian * once the addba completes or times out. 5489227364Sadrian */ 5490227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", 5491227364Sadrian __func__, tid->tid, tid->paused); 5492227364Sadrian ath_tx_tid_unsched(sc, tid); 5493250665Sadrian /* 5494250665Sadrian * This node may be in power-save and we're leaking 5495250665Sadrian * a frame; be careful. 5496250665Sadrian */ 5497250665Sadrian if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { 5498227364Sadrian continue; 5499227364Sadrian } 5500227364Sadrian if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) 5501227364Sadrian ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); 5502227364Sadrian else 5503227364Sadrian ath_tx_tid_hw_queue_norm(sc, tid->an, tid); 5504227364Sadrian 5505227364Sadrian /* Not empty? Re-schedule */ 5506227364Sadrian if (tid->axq_depth != 0) 5507227364Sadrian ath_tx_tid_sched(sc, tid); 5508227364Sadrian 5509250704Sadrian /* 5510250704Sadrian * Give the software queue time to aggregate more 5511250704Sadrian * packets. If we aren't running aggregation then 5512250704Sadrian * we should still limit the hardware queue depth. 5513250704Sadrian */ 5514250866Sadrian /* XXX TXQ locking */ 5515250866Sadrian if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { 5516227364Sadrian break; 5517227364Sadrian } 5518250866Sadrian if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { 5519250866Sadrian break; 5520250866Sadrian } 5521227364Sadrian 5522227364Sadrian /* 5523227364Sadrian * If this was the last entry on the original list, stop. 5524227364Sadrian * Otherwise nodes that have been rescheduled onto the end 5525227364Sadrian * of the TID FIFO list will just keep being rescheduled. 5526250665Sadrian * 5527250665Sadrian * XXX What should we do about nodes that were paused 5528250665Sadrian * but are pending a leaking frame in response to a ps-poll? 5529250665Sadrian * They'll be put at the front of the list; so they'll 5530250665Sadrian * prematurely trigger this condition! Ew. 5531227364Sadrian */ 5532227364Sadrian if (tid == last) 5533227364Sadrian break; 5534227364Sadrian } 5535227364Sadrian} 5536227364Sadrian 5537227364Sadrian/* 5538227364Sadrian * TX addba handling 5539227364Sadrian */ 5540227364Sadrian 5541227364Sadrian/* 5542227364Sadrian * Return net80211 TID struct pointer, or NULL for none 5543227364Sadrian */ 5544227364Sadrianstruct ieee80211_tx_ampdu * 5545227364Sadrianath_tx_get_tx_tid(struct ath_node *an, int tid) 5546227364Sadrian{ 5547227364Sadrian struct ieee80211_node *ni = &an->an_node; 5548227364Sadrian struct ieee80211_tx_ampdu *tap; 5549227364Sadrian 5550227364Sadrian if (tid == IEEE80211_NONQOS_TID) 5551227364Sadrian return NULL; 5552227364Sadrian 5553234324Sadrian tap = &ni->ni_tx_ampdu[tid]; 5554227364Sadrian return tap; 5555227364Sadrian} 5556227364Sadrian 5557227364Sadrian/* 5558227364Sadrian * Is AMPDU-TX running? 5559227364Sadrian */ 5560227364Sadrianstatic int 5561227364Sadrianath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) 5562227364Sadrian{ 5563227364Sadrian struct ieee80211_tx_ampdu *tap; 5564227364Sadrian 5565227364Sadrian if (tid == IEEE80211_NONQOS_TID) 5566227364Sadrian return 0; 5567227364Sadrian 5568227364Sadrian tap = ath_tx_get_tx_tid(an, tid); 5569227364Sadrian if (tap == NULL) 5570227364Sadrian return 0; /* Not valid; default to not running */ 5571227364Sadrian 5572227364Sadrian return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); 5573227364Sadrian} 5574227364Sadrian 5575227364Sadrian/* 5576227364Sadrian * Is AMPDU-TX negotiation pending? 5577227364Sadrian */ 5578227364Sadrianstatic int 5579227364Sadrianath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) 5580227364Sadrian{ 5581227364Sadrian struct ieee80211_tx_ampdu *tap; 5582227364Sadrian 5583227364Sadrian if (tid == IEEE80211_NONQOS_TID) 5584227364Sadrian return 0; 5585227364Sadrian 5586227364Sadrian tap = ath_tx_get_tx_tid(an, tid); 5587227364Sadrian if (tap == NULL) 5588227364Sadrian return 0; /* Not valid; default to not pending */ 5589227364Sadrian 5590227364Sadrian return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); 5591227364Sadrian} 5592227364Sadrian 5593227364Sadrian/* 5594227364Sadrian * Is AMPDU-TX pending for the given TID? 5595227364Sadrian */ 5596227364Sadrian 5597227364Sadrian 5598227364Sadrian/* 5599227364Sadrian * Method to handle sending an ADDBA request. 5600227364Sadrian * 5601227364Sadrian * We tap this so the relevant flags can be set to pause the TID 5602227364Sadrian * whilst waiting for the response. 5603227364Sadrian * 5604227364Sadrian * XXX there's no timeout handler we can override? 5605227364Sadrian */ 5606227364Sadrianint 5607227364Sadrianath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5608227364Sadrian int dialogtoken, int baparamset, int batimeout) 5609227364Sadrian{ 5610227364Sadrian struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5611234324Sadrian int tid = tap->txa_tid; 5612227364Sadrian struct ath_node *an = ATH_NODE(ni); 5613227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 5614227364Sadrian 5615227364Sadrian /* 5616227364Sadrian * XXX danger Will Robinson! 5617227364Sadrian * 5618227364Sadrian * Although the taskqueue may be running and scheduling some more 5619227364Sadrian * packets, these should all be _before_ the addba sequence number. 5620227364Sadrian * However, net80211 will keep self-assigning sequence numbers 5621227364Sadrian * until addba has been negotiated. 5622227364Sadrian * 5623227364Sadrian * In the past, these packets would be "paused" (which still works 5624227364Sadrian * fine, as they're being scheduled to the driver in the same 5625227364Sadrian * serialised method which is calling the addba request routine) 5626227364Sadrian * and when the aggregation session begins, they'll be dequeued 5627227364Sadrian * as aggregate packets and added to the BAW. However, now there's 5628227364Sadrian * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these 5629227364Sadrian * packets. Thus they never get included in the BAW tracking and 5630227364Sadrian * this can cause the initial burst of packets after the addba 5631227364Sadrian * negotiation to "hang", as they quickly fall outside the BAW. 5632227364Sadrian * 5633227364Sadrian * The "eventual" solution should be to tag these packets with 5634227364Sadrian * dobaw. Although net80211 has given us a sequence number, 5635227364Sadrian * it'll be "after" the left edge of the BAW and thus it'll 5636227364Sadrian * fall within it. 5637227364Sadrian */ 5638243786Sadrian ATH_TX_LOCK(sc); 5639235774Sadrian /* 5640235774Sadrian * This is a bit annoying. Until net80211 HT code inherits some 5641235774Sadrian * (any) locking, we may have this called in parallel BUT only 5642235774Sadrian * one response/timeout will be called. Grr. 5643235774Sadrian */ 5644235774Sadrian if (atid->addba_tx_pending == 0) { 5645235774Sadrian ath_tx_tid_pause(sc, atid); 5646235774Sadrian atid->addba_tx_pending = 1; 5647235774Sadrian } 5648243786Sadrian ATH_TX_UNLOCK(sc); 5649227364Sadrian 5650227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5651250611Sadrian "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", 5652250611Sadrian __func__, 5653250611Sadrian ni->ni_macaddr, 5654250611Sadrian ":", 5655250611Sadrian dialogtoken, baparamset, batimeout); 5656227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5657227364Sadrian "%s: txa_start=%d, ni_txseqs=%d\n", 5658227364Sadrian __func__, tap->txa_start, ni->ni_txseqs[tid]); 5659227364Sadrian 5660227364Sadrian return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5661227364Sadrian batimeout); 5662227364Sadrian} 5663227364Sadrian 5664227364Sadrian/* 5665227364Sadrian * Handle an ADDBA response. 5666227364Sadrian * 5667227364Sadrian * We unpause the queue so TX'ing can resume. 5668227364Sadrian * 5669227364Sadrian * Any packets TX'ed from this point should be "aggregate" (whether 5670227364Sadrian * aggregate or not) so the BAW is updated. 5671227364Sadrian * 5672227364Sadrian * Note! net80211 keeps self-assigning sequence numbers until 5673227364Sadrian * ampdu is negotiated. This means the initially-negotiated BAW left 5674227364Sadrian * edge won't match the ni->ni_txseq. 5675227364Sadrian * 5676227364Sadrian * So, being very dirty, the BAW left edge is "slid" here to match 5677227364Sadrian * ni->ni_txseq. 5678227364Sadrian * 5679227364Sadrian * What likely SHOULD happen is that all packets subsequent to the 5680227364Sadrian * addba request should be tagged as aggregate and queued as non-aggregate 5681227364Sadrian * frames; thus updating the BAW. For now though, I'll just slide the 5682227364Sadrian * window. 5683227364Sadrian */ 5684227364Sadrianint 5685227364Sadrianath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5686227364Sadrian int status, int code, int batimeout) 5687227364Sadrian{ 5688227364Sadrian struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5689234324Sadrian int tid = tap->txa_tid; 5690227364Sadrian struct ath_node *an = ATH_NODE(ni); 5691227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 5692227364Sadrian int r; 5693227364Sadrian 5694227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5695250611Sadrian "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, 5696250611Sadrian ni->ni_macaddr, 5697250611Sadrian ":", 5698227364Sadrian status, code, batimeout); 5699227364Sadrian 5700227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5701227364Sadrian "%s: txa_start=%d, ni_txseqs=%d\n", 5702227364Sadrian __func__, tap->txa_start, ni->ni_txseqs[tid]); 5703227364Sadrian 5704227364Sadrian /* 5705227364Sadrian * Call this first, so the interface flags get updated 5706227364Sadrian * before the TID is unpaused. Otherwise a race condition 5707227364Sadrian * exists where the unpaused TID still doesn't yet have 5708227364Sadrian * IEEE80211_AGGR_RUNNING set. 5709227364Sadrian */ 5710227364Sadrian r = sc->sc_addba_response(ni, tap, status, code, batimeout); 5711227364Sadrian 5712243786Sadrian ATH_TX_LOCK(sc); 5713235774Sadrian atid->addba_tx_pending = 0; 5714227364Sadrian /* 5715227364Sadrian * XXX dirty! 5716227364Sadrian * Slide the BAW left edge to wherever net80211 left it for us. 5717227364Sadrian * Read above for more information. 5718227364Sadrian */ 5719227364Sadrian tap->txa_start = ni->ni_txseqs[tid]; 5720227364Sadrian ath_tx_tid_resume(sc, atid); 5721243786Sadrian ATH_TX_UNLOCK(sc); 5722227364Sadrian return r; 5723227364Sadrian} 5724227364Sadrian 5725227364Sadrian 5726227364Sadrian/* 5727227364Sadrian * Stop ADDBA on a queue. 5728237593Sadrian * 5729237593Sadrian * This can be called whilst BAR TX is currently active on the queue, 5730237593Sadrian * so make sure this is unblocked before continuing. 5731227364Sadrian */ 5732227364Sadrianvoid 5733227364Sadrianath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 5734227364Sadrian{ 5735227364Sadrian struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5736234324Sadrian int tid = tap->txa_tid; 5737227364Sadrian struct ath_node *an = ATH_NODE(ni); 5738227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 5739250608Sadrian ath_bufhead bf_cq; 5740250608Sadrian struct ath_buf *bf; 5741227364Sadrian 5742250611Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", 5743250611Sadrian __func__, 5744250611Sadrian ni->ni_macaddr, 5745250611Sadrian ":"); 5746227364Sadrian 5747237593Sadrian /* 5748237593Sadrian * Pause TID traffic early, so there aren't any races 5749237593Sadrian * Unblock the pending BAR held traffic, if it's currently paused. 5750237593Sadrian */ 5751243786Sadrian ATH_TX_LOCK(sc); 5752227364Sadrian ath_tx_tid_pause(sc, atid); 5753237593Sadrian if (atid->bar_wait) { 5754237593Sadrian /* 5755237593Sadrian * bar_unsuspend() expects bar_tx == 1, as it should be 5756237593Sadrian * called from the TX completion path. This quietens 5757237593Sadrian * the warning. It's cleared for us anyway. 5758237593Sadrian */ 5759237593Sadrian atid->bar_tx = 1; 5760237593Sadrian ath_tx_tid_bar_unsuspend(sc, atid); 5761237593Sadrian } 5762243786Sadrian ATH_TX_UNLOCK(sc); 5763227364Sadrian 5764227364Sadrian /* There's no need to hold the TXQ lock here */ 5765227364Sadrian sc->sc_addba_stop(ni, tap); 5766227364Sadrian 5767227364Sadrian /* 5768235749Sadrian * ath_tx_tid_cleanup will resume the TID if possible, otherwise 5769227364Sadrian * it'll set the cleanup flag, and it'll be unpaused once 5770227364Sadrian * things have been cleaned up. 5771227364Sadrian */ 5772250608Sadrian TAILQ_INIT(&bf_cq); 5773250608Sadrian ATH_TX_LOCK(sc); 5774250608Sadrian ath_tx_tid_cleanup(sc, an, tid, &bf_cq); 5775251090Sadrian /* 5776251090Sadrian * Unpause the TID if no cleanup is required. 5777251090Sadrian */ 5778251090Sadrian if (! atid->cleanup_inprogress) 5779251090Sadrian ath_tx_tid_resume(sc, atid); 5780250608Sadrian ATH_TX_UNLOCK(sc); 5781250608Sadrian 5782250608Sadrian /* Handle completing frames and fail them */ 5783250608Sadrian while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5784250608Sadrian TAILQ_REMOVE(&bf_cq, bf, bf_list); 5785250608Sadrian ath_tx_default_comp(sc, bf, 1); 5786250608Sadrian } 5787250665Sadrian 5788227364Sadrian} 5789227364Sadrian 5790227364Sadrian/* 5791250608Sadrian * Handle a node reassociation. 5792250608Sadrian * 5793250608Sadrian * We may have a bunch of frames queued to the hardware; those need 5794250608Sadrian * to be marked as cleanup. 5795250608Sadrian */ 5796250608Sadrianvoid 5797250608Sadrianath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) 5798250608Sadrian{ 5799250608Sadrian struct ath_tid *tid; 5800250608Sadrian int i; 5801250608Sadrian ath_bufhead bf_cq; 5802250608Sadrian struct ath_buf *bf; 5803250608Sadrian 5804250608Sadrian TAILQ_INIT(&bf_cq); 5805250608Sadrian 5806250608Sadrian ATH_TX_UNLOCK_ASSERT(sc); 5807250608Sadrian 5808250608Sadrian ATH_TX_LOCK(sc); 5809250608Sadrian for (i = 0; i < IEEE80211_TID_SIZE; i++) { 5810250608Sadrian tid = &an->an_tid[i]; 5811250608Sadrian if (tid->hwq_depth == 0) 5812250608Sadrian continue; 5813250608Sadrian ath_tx_tid_pause(sc, tid); 5814250608Sadrian DPRINTF(sc, ATH_DEBUG_NODE, 5815250608Sadrian "%s: %6D: TID %d: cleaning up TID\n", 5816250608Sadrian __func__, 5817250608Sadrian an->an_node.ni_macaddr, 5818250608Sadrian ":", 5819250608Sadrian i); 5820250608Sadrian ath_tx_tid_cleanup(sc, an, i, &bf_cq); 5821251090Sadrian /* 5822251090Sadrian * Unpause the TID if no cleanup is required. 5823251090Sadrian */ 5824251090Sadrian if (! tid->cleanup_inprogress) 5825251090Sadrian ath_tx_tid_resume(sc, tid); 5826250608Sadrian } 5827250608Sadrian ATH_TX_UNLOCK(sc); 5828250608Sadrian 5829250608Sadrian /* Handle completing frames and fail them */ 5830250608Sadrian while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { 5831250608Sadrian TAILQ_REMOVE(&bf_cq, bf, bf_list); 5832250608Sadrian ath_tx_default_comp(sc, bf, 1); 5833250608Sadrian } 5834250608Sadrian} 5835250608Sadrian 5836250608Sadrian/* 5837227364Sadrian * Note: net80211 bar_timeout() doesn't call this function on BAR failure; 5838227364Sadrian * it simply tears down the aggregation session. Ew. 5839227364Sadrian * 5840227364Sadrian * It however will call ieee80211_ampdu_stop() which will call 5841227364Sadrian * ic->ic_addba_stop(). 5842227364Sadrian * 5843227364Sadrian * XXX This uses a hard-coded max BAR count value; the whole 5844227364Sadrian * XXX BAR TX success or failure should be better handled! 5845227364Sadrian */ 5846227364Sadrianvoid 5847227364Sadrianath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5848227364Sadrian int status) 5849227364Sadrian{ 5850227364Sadrian struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5851234324Sadrian int tid = tap->txa_tid; 5852227364Sadrian struct ath_node *an = ATH_NODE(ni); 5853227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 5854227364Sadrian int attempts = tap->txa_attempts; 5855227364Sadrian 5856235491Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5857250705Sadrian "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n", 5858235491Sadrian __func__, 5859250611Sadrian ni->ni_macaddr, 5860250611Sadrian ":", 5861235676Sadrian tap->txa_tid, 5862235676Sadrian atid->tid, 5863235491Sadrian status, 5864235491Sadrian attempts); 5865227364Sadrian 5866227364Sadrian /* Note: This may update the BAW details */ 5867227364Sadrian sc->sc_bar_response(ni, tap, status); 5868227364Sadrian 5869227364Sadrian /* Unpause the TID */ 5870227364Sadrian /* 5871227364Sadrian * XXX if this is attempt=50, the TID will be downgraded 5872227364Sadrian * XXX to a non-aggregate session. So we must unpause the 5873227364Sadrian * XXX TID here or it'll never be done. 5874240721Sadrian * 5875240721Sadrian * Also, don't call it if bar_tx/bar_wait are 0; something 5876240721Sadrian * has beaten us to the punch? (XXX figure out what?) 5877227364Sadrian */ 5878227364Sadrian if (status == 0 || attempts == 50) { 5879243786Sadrian ATH_TX_LOCK(sc); 5880240721Sadrian if (atid->bar_tx == 0 || atid->bar_wait == 0) 5881259341Srpaulo DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, 5882240721Sadrian "%s: huh? bar_tx=%d, bar_wait=%d\n", 5883240721Sadrian __func__, 5884240721Sadrian atid->bar_tx, atid->bar_wait); 5885240721Sadrian else 5886240721Sadrian ath_tx_tid_bar_unsuspend(sc, atid); 5887243786Sadrian ATH_TX_UNLOCK(sc); 5888227364Sadrian } 5889227364Sadrian} 5890227364Sadrian 5891227364Sadrian/* 5892227364Sadrian * This is called whenever the pending ADDBA request times out. 5893227364Sadrian * Unpause and reschedule the TID. 5894227364Sadrian */ 5895227364Sadrianvoid 5896227364Sadrianath_addba_response_timeout(struct ieee80211_node *ni, 5897227364Sadrian struct ieee80211_tx_ampdu *tap) 5898227364Sadrian{ 5899227364Sadrian struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5900234324Sadrian int tid = tap->txa_tid; 5901227364Sadrian struct ath_node *an = ATH_NODE(ni); 5902227364Sadrian struct ath_tid *atid = &an->an_tid[tid]; 5903227364Sadrian 5904227364Sadrian DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, 5905250705Sadrian "%s: %6D: TID=%d, called; resuming\n", 5906250611Sadrian __func__, 5907250611Sadrian ni->ni_macaddr, 5908250705Sadrian ":", 5909250705Sadrian tid); 5910227364Sadrian 5911243786Sadrian ATH_TX_LOCK(sc); 5912235774Sadrian atid->addba_tx_pending = 0; 5913243786Sadrian ATH_TX_UNLOCK(sc); 5914235774Sadrian 5915227364Sadrian /* Note: This updates the aggregate state to (again) pending */ 5916227364Sadrian sc->sc_addba_response_timeout(ni, tap); 5917227364Sadrian 5918227364Sadrian /* Unpause the TID; which reschedules it */ 5919243786Sadrian ATH_TX_LOCK(sc); 5920227364Sadrian ath_tx_tid_resume(sc, atid); 5921243786Sadrian ATH_TX_UNLOCK(sc); 5922227364Sadrian} 5923238710Sadrian 5924241170Sadrian/* 5925241170Sadrian * Check if a node is asleep or not. 5926241170Sadrian */ 5927242271Sadrianint 5928241170Sadrianath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) 5929241170Sadrian{ 5930241170Sadrian 5931250608Sadrian ATH_TX_LOCK_ASSERT(sc); 5932241170Sadrian 5933241170Sadrian return (an->an_is_powersave); 5934241170Sadrian} 5935241170Sadrian 5936241170Sadrian/* 5937241170Sadrian * Mark a node as currently "in powersaving." 5938241170Sadrian * This suspends all traffic on the node. 5939241170Sadrian * 5940241170Sadrian * This must be called with the node/tx locks free. 5941241170Sadrian * 5942241170Sadrian * XXX TODO: the locking silliness below is due to how the node 5943241170Sadrian * locking currently works. Right now, the node lock is grabbed 5944241170Sadrian * to do rate control lookups and these are done with the TX 5945241170Sadrian * queue lock held. This means the node lock can't be grabbed 5946241170Sadrian * first here or a LOR will occur. 5947241170Sadrian * 5948241170Sadrian * Eventually (hopefully!) the TX path code will only grab 5949241170Sadrian * the TXQ lock when transmitting and the ath_node lock when 5950241170Sadrian * doing node/TID operations. There are other complications - 5951241170Sadrian * the sched/unsched operations involve walking the per-txq 5952241170Sadrian * 'active tid' list and this requires both locks to be held. 5953241170Sadrian */ 5954241170Sadrianvoid 5955241170Sadrianath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) 5956241170Sadrian{ 5957241170Sadrian struct ath_tid *atid; 5958241170Sadrian struct ath_txq *txq; 5959241170Sadrian int tid; 5960241170Sadrian 5961250608Sadrian ATH_TX_UNLOCK_ASSERT(sc); 5962241170Sadrian 5963241170Sadrian /* Suspend all traffic on the node */ 5964243786Sadrian ATH_TX_LOCK(sc); 5965250665Sadrian 5966250665Sadrian if (an->an_is_powersave) { 5967259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 5968250665Sadrian "%s: %6D: node was already asleep!\n", 5969259341Srpaulo __func__, an->an_node.ni_macaddr, ":"); 5970250665Sadrian ATH_TX_UNLOCK(sc); 5971250665Sadrian return; 5972250665Sadrian } 5973250665Sadrian 5974241170Sadrian for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 5975241170Sadrian atid = &an->an_tid[tid]; 5976241170Sadrian txq = sc->sc_ac2q[atid->ac]; 5977241170Sadrian 5978241170Sadrian ath_tx_tid_pause(sc, atid); 5979241170Sadrian } 5980241170Sadrian 5981241170Sadrian /* Mark node as in powersaving */ 5982241170Sadrian an->an_is_powersave = 1; 5983241170Sadrian 5984250608Sadrian ATH_TX_UNLOCK(sc); 5985241170Sadrian} 5986241170Sadrian 5987241170Sadrian/* 5988241170Sadrian * Mark a node as currently "awake." 5989241170Sadrian * This resumes all traffic to the node. 5990241170Sadrian */ 5991241170Sadrianvoid 5992241170Sadrianath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) 5993241170Sadrian{ 5994241170Sadrian struct ath_tid *atid; 5995241170Sadrian struct ath_txq *txq; 5996241170Sadrian int tid; 5997241170Sadrian 5998250608Sadrian ATH_TX_UNLOCK_ASSERT(sc); 5999241170Sadrian 6000250608Sadrian ATH_TX_LOCK(sc); 6001250608Sadrian 6002250665Sadrian /* !? */ 6003241170Sadrian if (an->an_is_powersave == 0) { 6004250608Sadrian ATH_TX_UNLOCK(sc); 6005259341Srpaulo DPRINTF(sc, ATH_DEBUG_XMIT, 6006241170Sadrian "%s: an=%p: node was already awake\n", 6007241170Sadrian __func__, an); 6008241170Sadrian return; 6009241170Sadrian } 6010241170Sadrian 6011241170Sadrian /* Mark node as awake */ 6012241170Sadrian an->an_is_powersave = 0; 6013250665Sadrian /* 6014250665Sadrian * Clear any pending leaked frame requests 6015250665Sadrian */ 6016250665Sadrian an->an_leak_count = 0; 6017241170Sadrian 6018241170Sadrian for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { 6019241170Sadrian atid = &an->an_tid[tid]; 6020241170Sadrian txq = sc->sc_ac2q[atid->ac]; 6021241170Sadrian 6022241170Sadrian ath_tx_tid_resume(sc, atid); 6023241170Sadrian } 6024243786Sadrian ATH_TX_UNLOCK(sc); 6025241170Sadrian} 6026241170Sadrian 6027241170Sadrianstatic int 6028238710Sadrianath_legacy_dma_txsetup(struct ath_softc *sc) 6029238710Sadrian{ 6030238710Sadrian 6031238710Sadrian /* nothing new needed */ 6032238710Sadrian return (0); 6033238710Sadrian} 6034238710Sadrian 6035238710Sadrianstatic int 6036238710Sadrianath_legacy_dma_txteardown(struct ath_softc *sc) 6037238710Sadrian{ 6038238710Sadrian 6039238710Sadrian /* nothing new needed */ 6040238710Sadrian return (0); 6041238710Sadrian} 6042238710Sadrian 6043238710Sadrianvoid 6044238710Sadrianath_xmit_setup_legacy(struct ath_softc *sc) 6045238710Sadrian{ 6046238729Sadrian /* 6047238729Sadrian * For now, just set the descriptor length to sizeof(ath_desc); 6048238729Sadrian * worry about extracting the real length out of the HAL later. 6049238729Sadrian */ 6050238729Sadrian sc->sc_tx_desclen = sizeof(struct ath_desc); 6051243162Sadrian sc->sc_tx_statuslen = sizeof(struct ath_desc); 6052238729Sadrian sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ 6053238710Sadrian 6054238710Sadrian sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; 6055238710Sadrian sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; 6056238931Sadrian sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; 6057238930Sadrian 6058238930Sadrian sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; 6059238930Sadrian sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; 6060239204Sadrian 6061239204Sadrian sc->sc_tx.xmit_drain = ath_legacy_tx_drain; 6062238710Sadrian} 6063