if_ath.c revision 227351
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 227351 2011-11-08 18:45:15Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 */ 46#include "opt_ah.h" 47#include "opt_wlan.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/sysctl.h> 52#include <sys/mbuf.h> 53#include <sys/malloc.h> 54#include <sys/lock.h> 55#include <sys/mutex.h> 56#include <sys/kernel.h> 57#include <sys/socket.h> 58#include <sys/sockio.h> 59#include <sys/errno.h> 60#include <sys/callout.h> 61#include <sys/bus.h> 62#include <sys/endian.h> 63#include <sys/kthread.h> 64#include <sys/taskqueue.h> 65#include <sys/priv.h> 66#include <sys/module.h> 67 68#include <machine/bus.h> 69 70#include <net/if.h> 71#include <net/if_dl.h> 72#include <net/if_media.h> 73#include <net/if_types.h> 74#include <net/if_arp.h> 75#include <net/ethernet.h> 76#include <net/if_llc.h> 77 78#include <net80211/ieee80211_var.h> 79#include <net80211/ieee80211_regdomain.h> 80#ifdef IEEE80211_SUPPORT_SUPERG 81#include <net80211/ieee80211_superg.h> 82#endif 83#ifdef IEEE80211_SUPPORT_TDMA 84#include <net80211/ieee80211_tdma.h> 85#endif 86 87#include <net/bpf.h> 88 89#ifdef INET 90#include <netinet/in.h> 91#include <netinet/if_ether.h> 92#endif 93 94#include <dev/ath/if_athvar.h> 95#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 96#include <dev/ath/ath_hal/ah_diagcodes.h> 97 98#include <dev/ath/if_ath_debug.h> 99#include <dev/ath/if_ath_misc.h> 100#include <dev/ath/if_ath_tx.h> 101#include <dev/ath/if_ath_sysctl.h> 102#include <dev/ath/if_ath_keycache.h> 103#include <dev/ath/if_athdfs.h> 104 105#ifdef ATH_TX99_DIAG 106#include <dev/ath/ath_tx99/ath_tx99.h> 107#endif 108 109 110/* 111 * ATH_BCBUF determines the number of vap's that can transmit 112 * beacons and also (currently) the number of vap's that can 113 * have unique mac addresses/bssid. When staggering beacons 114 * 4 is probably a good max as otherwise the beacons become 115 * very closely spaced and there is limited time for cab q traffic 116 * to go out. You can burst beacons instead but that is not good 117 * for stations in power save and at some point you really want 118 * another radio (and channel). 119 * 120 * The limit on the number of mac addresses is tied to our use of 121 * the U/L bit and tracking addresses in a byte; it would be 122 * worthwhile to allow more for applications like proxy sta. 123 */ 124CTASSERT(ATH_BCBUF <= 8); 125 126static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 127 const char name[IFNAMSIZ], int unit, int opmode, 128 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 129 const uint8_t mac[IEEE80211_ADDR_LEN]); 130static void ath_vap_delete(struct ieee80211vap *); 131static void ath_init(void *); 132static void ath_stop_locked(struct ifnet *); 133static void ath_stop(struct ifnet *); 134static void ath_start(struct ifnet *); 135static int ath_reset_vap(struct ieee80211vap *, u_long); 136static int ath_media_change(struct ifnet *); 137static void ath_watchdog(void *); 138static int ath_ioctl(struct ifnet *, u_long, caddr_t); 139static void ath_fatal_proc(void *, int); 140static void ath_bmiss_vap(struct ieee80211vap *); 141static void ath_bmiss_proc(void *, int); 142static void ath_key_update_begin(struct ieee80211vap *); 143static void ath_key_update_end(struct ieee80211vap *); 144static void ath_update_mcast(struct ifnet *); 145static void ath_update_promisc(struct ifnet *); 146static void ath_mode_init(struct ath_softc *); 147static void ath_setslottime(struct ath_softc *); 148static void ath_updateslot(struct ifnet *); 149static int ath_beaconq_setup(struct ath_hal *); 150static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); 151static void ath_beacon_update(struct ieee80211vap *, int item); 152static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); 153static void ath_beacon_proc(void *, int); 154static struct ath_buf *ath_beacon_generate(struct ath_softc *, 155 struct ieee80211vap *); 156static void ath_bstuck_proc(void *, int); 157static void ath_beacon_return(struct ath_softc *, struct ath_buf *); 158static void ath_beacon_free(struct ath_softc *); 159static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); 160static void ath_descdma_cleanup(struct ath_softc *sc, 161 struct ath_descdma *, ath_bufhead *); 162static int ath_desc_alloc(struct ath_softc *); 163static void ath_desc_free(struct ath_softc *); 164static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 165 const uint8_t [IEEE80211_ADDR_LEN]); 166static void ath_node_free(struct ieee80211_node *); 167static void ath_node_getsignal(const struct ieee80211_node *, 168 int8_t *, int8_t *); 169static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); 170static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 171 int subtype, int rssi, int nf); 172static void ath_setdefantenna(struct ath_softc *, u_int); 173static void ath_rx_proc(struct ath_softc *sc, int); 174static void ath_rx_tasklet(void *, int); 175static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 176static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 177static int ath_tx_setup(struct ath_softc *, int, int); 178static int ath_wme_update(struct ieee80211com *); 179static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 180static void ath_tx_cleanup(struct ath_softc *); 181static void ath_tx_proc_q0(void *, int); 182static void ath_tx_proc_q0123(void *, int); 183static void ath_tx_proc(void *, int); 184static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *); 185static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 186static void ath_draintxq(struct ath_softc *); 187static void ath_stoprecv(struct ath_softc *); 188static int ath_startrecv(struct ath_softc *); 189static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 190static void ath_scan_start(struct ieee80211com *); 191static void ath_scan_end(struct ieee80211com *); 192static void ath_set_channel(struct ieee80211com *); 193static void ath_calibrate(void *); 194static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 195static void ath_setup_stationkey(struct ieee80211_node *); 196static void ath_newassoc(struct ieee80211_node *, int); 197static int ath_setregdomain(struct ieee80211com *, 198 struct ieee80211_regdomain *, int, 199 struct ieee80211_channel []); 200static void ath_getradiocaps(struct ieee80211com *, int, int *, 201 struct ieee80211_channel []); 202static int ath_getchannels(struct ath_softc *); 203static void ath_led_event(struct ath_softc *, int); 204 205static int ath_rate_setup(struct ath_softc *, u_int mode); 206static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 207 208static void ath_announce(struct ath_softc *); 209 210static void ath_dfs_tasklet(void *, int); 211 212#ifdef IEEE80211_SUPPORT_TDMA 213static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, 214 u_int32_t bintval); 215static void ath_tdma_bintvalsetup(struct ath_softc *sc, 216 const struct ieee80211_tdma_state *tdma); 217static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap); 218static void ath_tdma_update(struct ieee80211_node *ni, 219 const struct ieee80211_tdma_param *tdma, int); 220static void ath_tdma_beacon_send(struct ath_softc *sc, 221 struct ieee80211vap *vap); 222 223#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 224#define TDMA_LPF_LEN 6 225#define TDMA_DUMMY_MARKER 0x127 226#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 227#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 228#define TDMA_LPF(x, y, len) \ 229 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 230#define TDMA_SAMPLE(x, y) do { \ 231 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 232} while (0) 233#define TDMA_EP_RND(x,mul) \ 234 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 235#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 236#endif /* IEEE80211_SUPPORT_TDMA */ 237 238SYSCTL_DECL(_hw_ath); 239 240/* XXX validate sysctl values */ 241static int ath_longcalinterval = 30; /* long cals every 30 secs */ 242SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 243 0, "long chip calibration interval (secs)"); 244static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 245SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 246 0, "short chip calibration interval (msecs)"); 247static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 248SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 249 0, "reset chip calibration results (secs)"); 250static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 251SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 252 0, "ANI calibration (msecs)"); 253 254static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 255SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 256 0, "rx buffers allocated"); 257TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 258static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 259SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 260 0, "tx buffers allocated"); 261TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 262 263static int ath_bstuck_threshold = 4; /* max missed beacons */ 264SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 265 0, "max missed beacon xmits before chip reset"); 266 267MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 268 269#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 270#define HAL_MODE_HT40 \ 271 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 272 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 273int 274ath_attach(u_int16_t devid, struct ath_softc *sc) 275{ 276 struct ifnet *ifp; 277 struct ieee80211com *ic; 278 struct ath_hal *ah = NULL; 279 HAL_STATUS status; 280 int error = 0, i; 281 u_int wmodes; 282 uint8_t macaddr[IEEE80211_ADDR_LEN]; 283 284 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 285 286 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 287 if (ifp == NULL) { 288 device_printf(sc->sc_dev, "can not if_alloc()\n"); 289 error = ENOSPC; 290 goto bad; 291 } 292 ic = ifp->if_l2com; 293 294 /* set these up early for if_printf use */ 295 if_initname(ifp, device_get_name(sc->sc_dev), 296 device_get_unit(sc->sc_dev)); 297 298 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, sc->sc_eepromdata, &status); 299 if (ah == NULL) { 300 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 301 status); 302 error = ENXIO; 303 goto bad; 304 } 305 sc->sc_ah = ah; 306 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 307#ifdef ATH_DEBUG 308 sc->sc_debug = ath_debug; 309#endif 310 311 /* 312 * Check if the MAC has multi-rate retry support. 313 * We do this by trying to setup a fake extended 314 * descriptor. MAC's that don't have support will 315 * return false w/o doing anything. MAC's that do 316 * support it will return true w/o doing anything. 317 */ 318 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 319 320 /* 321 * Check if the device has hardware counters for PHY 322 * errors. If so we need to enable the MIB interrupt 323 * so we can act on stat triggers. 324 */ 325 if (ath_hal_hwphycounters(ah)) 326 sc->sc_needmib = 1; 327 328 /* 329 * Get the hardware key cache size. 330 */ 331 sc->sc_keymax = ath_hal_keycachesize(ah); 332 if (sc->sc_keymax > ATH_KEYMAX) { 333 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 334 ATH_KEYMAX, sc->sc_keymax); 335 sc->sc_keymax = ATH_KEYMAX; 336 } 337 /* 338 * Reset the key cache since some parts do not 339 * reset the contents on initial power up. 340 */ 341 for (i = 0; i < sc->sc_keymax; i++) 342 ath_hal_keyreset(ah, i); 343 344 /* 345 * Collect the default channel list. 346 */ 347 error = ath_getchannels(sc); 348 if (error != 0) 349 goto bad; 350 351 /* 352 * Setup rate tables for all potential media types. 353 */ 354 ath_rate_setup(sc, IEEE80211_MODE_11A); 355 ath_rate_setup(sc, IEEE80211_MODE_11B); 356 ath_rate_setup(sc, IEEE80211_MODE_11G); 357 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 358 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 359 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 360 ath_rate_setup(sc, IEEE80211_MODE_11NA); 361 ath_rate_setup(sc, IEEE80211_MODE_11NG); 362 ath_rate_setup(sc, IEEE80211_MODE_HALF); 363 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 364 365 /* NB: setup here so ath_rate_update is happy */ 366 ath_setcurmode(sc, IEEE80211_MODE_11A); 367 368 /* 369 * Allocate tx+rx descriptors and populate the lists. 370 */ 371 error = ath_desc_alloc(sc); 372 if (error != 0) { 373 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 374 goto bad; 375 } 376 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 377 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 378 379 ATH_TXBUF_LOCK_INIT(sc); 380 381 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 382 taskqueue_thread_enqueue, &sc->sc_tq); 383 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 384 "%s taskq", ifp->if_xname); 385 386 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 387 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 388 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 389 390 /* 391 * Allocate hardware transmit queues: one queue for 392 * beacon frames and one data queue for each QoS 393 * priority. Note that the hal handles resetting 394 * these queues at the needed time. 395 * 396 * XXX PS-Poll 397 */ 398 sc->sc_bhalq = ath_beaconq_setup(ah); 399 if (sc->sc_bhalq == (u_int) -1) { 400 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 401 error = EIO; 402 goto bad2; 403 } 404 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 405 if (sc->sc_cabq == NULL) { 406 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 407 error = EIO; 408 goto bad2; 409 } 410 /* NB: insure BK queue is the lowest priority h/w queue */ 411 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 412 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 413 ieee80211_wme_acnames[WME_AC_BK]); 414 error = EIO; 415 goto bad2; 416 } 417 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 418 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 419 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 420 /* 421 * Not enough hardware tx queues to properly do WME; 422 * just punt and assign them all to the same h/w queue. 423 * We could do a better job of this if, for example, 424 * we allocate queues when we switch from station to 425 * AP mode. 426 */ 427 if (sc->sc_ac2q[WME_AC_VI] != NULL) 428 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 429 if (sc->sc_ac2q[WME_AC_BE] != NULL) 430 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 431 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 432 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 433 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 434 } 435 436 /* 437 * Special case certain configurations. Note the 438 * CAB queue is handled by these specially so don't 439 * include them when checking the txq setup mask. 440 */ 441 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 442 case 0x01: 443 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 444 break; 445 case 0x0f: 446 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 447 break; 448 default: 449 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 450 break; 451 } 452 453 /* 454 * Setup rate control. Some rate control modules 455 * call back to change the anntena state so expose 456 * the necessary entry points. 457 * XXX maybe belongs in struct ath_ratectrl? 458 */ 459 sc->sc_setdefantenna = ath_setdefantenna; 460 sc->sc_rc = ath_rate_attach(sc); 461 if (sc->sc_rc == NULL) { 462 error = EIO; 463 goto bad2; 464 } 465 466 /* Attach DFS module */ 467 if (! ath_dfs_attach(sc)) { 468 device_printf(sc->sc_dev, "%s: unable to attach DFS\n", __func__); 469 error = EIO; 470 goto bad2; 471 } 472 473 /* Start DFS processing tasklet */ 474 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 475 476 sc->sc_blinking = 0; 477 sc->sc_ledstate = 1; 478 sc->sc_ledon = 0; /* low true */ 479 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 480 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 481 /* 482 * Auto-enable soft led processing for IBM cards and for 483 * 5211 minipci cards. Users can also manually enable/disable 484 * support with a sysctl. 485 */ 486 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 487 if (sc->sc_softled) { 488 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin, 489 HAL_GPIO_MUX_MAC_NETWORK_LED); 490 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 491 } 492 493 ifp->if_softc = sc; 494 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 495 ifp->if_start = ath_start; 496 ifp->if_ioctl = ath_ioctl; 497 ifp->if_init = ath_init; 498 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 499 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 500 IFQ_SET_READY(&ifp->if_snd); 501 502 ic->ic_ifp = ifp; 503 /* XXX not right but it's not used anywhere important */ 504 ic->ic_phytype = IEEE80211_T_OFDM; 505 ic->ic_opmode = IEEE80211_M_STA; 506 ic->ic_caps = 507 IEEE80211_C_STA /* station mode */ 508 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 509 | IEEE80211_C_HOSTAP /* hostap mode */ 510 | IEEE80211_C_MONITOR /* monitor mode */ 511 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 512 | IEEE80211_C_WDS /* 4-address traffic works */ 513 | IEEE80211_C_MBSS /* mesh point link mode */ 514 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 515 | IEEE80211_C_SHSLOT /* short slot time supported */ 516 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 517 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 518 | IEEE80211_C_TXFRAG /* handle tx frags */ 519#ifdef ATH_ENABLE_DFS 520 | IEEE80211_C_DFS /* Enable DFS radar detection */ 521#endif 522 ; 523 /* 524 * Query the hal to figure out h/w crypto support. 525 */ 526 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 527 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 528 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 529 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 530 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 531 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 532 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 533 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 534 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 535 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 536 /* 537 * Check if h/w does the MIC and/or whether the 538 * separate key cache entries are required to 539 * handle both tx+rx MIC keys. 540 */ 541 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 542 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 543 /* 544 * If the h/w supports storing tx+rx MIC keys 545 * in one cache slot automatically enable use. 546 */ 547 if (ath_hal_hastkipsplit(ah) || 548 !ath_hal_settkipsplit(ah, AH_FALSE)) 549 sc->sc_splitmic = 1; 550 /* 551 * If the h/w can do TKIP MIC together with WME then 552 * we use it; otherwise we force the MIC to be done 553 * in software by the net80211 layer. 554 */ 555 if (ath_hal_haswmetkipmic(ah)) 556 sc->sc_wmetkipmic = 1; 557 } 558 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 559 /* 560 * Check for multicast key search support. 561 */ 562 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 563 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 564 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 565 } 566 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 567 /* 568 * Mark key cache slots associated with global keys 569 * as in use. If we knew TKIP was not to be used we 570 * could leave the +32, +64, and +32+64 slots free. 571 */ 572 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 573 setbit(sc->sc_keymap, i); 574 setbit(sc->sc_keymap, i+64); 575 if (sc->sc_splitmic) { 576 setbit(sc->sc_keymap, i+32); 577 setbit(sc->sc_keymap, i+32+64); 578 } 579 } 580 /* 581 * TPC support can be done either with a global cap or 582 * per-packet support. The latter is not available on 583 * all parts. We're a bit pedantic here as all parts 584 * support a global cap. 585 */ 586 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 587 ic->ic_caps |= IEEE80211_C_TXPMGT; 588 589 /* 590 * Mark WME capability only if we have sufficient 591 * hardware queues to do proper priority scheduling. 592 */ 593 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 594 ic->ic_caps |= IEEE80211_C_WME; 595 /* 596 * Check for misc other capabilities. 597 */ 598 if (ath_hal_hasbursting(ah)) 599 ic->ic_caps |= IEEE80211_C_BURST; 600 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 601 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 602 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 603 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 604 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 605 if (ath_hal_hasfastframes(ah)) 606 ic->ic_caps |= IEEE80211_C_FF; 607 wmodes = ath_hal_getwirelessmodes(ah); 608 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 609 ic->ic_caps |= IEEE80211_C_TURBOP; 610#ifdef IEEE80211_SUPPORT_TDMA 611 if (ath_hal_macversion(ah) > 0x78) { 612 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 613 ic->ic_tdma_update = ath_tdma_update; 614 } 615#endif 616 617 /* 618 * The if_ath 11n support is completely not ready for normal use. 619 * Enabling this option will likely break everything and everything. 620 * Don't think of doing that unless you know what you're doing. 621 */ 622 623#ifdef ATH_ENABLE_11N 624 /* 625 * Query HT capabilities 626 */ 627 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 628 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 629 int rxs, txs; 630 631 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 632 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 633 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 634 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 635 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 636 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 637 ; 638 639 /* 640 * Enable short-GI for HT20 only if the hardware 641 * advertises support. 642 * Notably, anything earlier than the AR9287 doesn't. 643 */ 644 if ((ath_hal_getcapability(ah, 645 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 646 (wmodes & HAL_MODE_HT20)) { 647 device_printf(sc->sc_dev, 648 "[HT] enabling short-GI in 20MHz mode\n"); 649 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 650 } 651 652 if (wmodes & HAL_MODE_HT40) 653 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 654 | IEEE80211_HTCAP_SHORTGI40; 655 656 /* 657 * rx/tx stream is not currently used anywhere; it needs to be taken 658 * into account when negotiating which MCS rates it'll receive and 659 * what MCS rates are available for TX. 660 */ 661 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &rxs); 662 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &txs); 663 664 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 665 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 666 667 ic->ic_txstream = txs; 668 ic->ic_rxstream = rxs; 669 670 device_printf(sc->sc_dev, "[HT] %d RX streams; %d TX streams\n", rxs, txs); 671 } 672#endif 673 674 /* 675 * Indicate we need the 802.11 header padded to a 676 * 32-bit boundary for 4-address and QoS frames. 677 */ 678 ic->ic_flags |= IEEE80211_F_DATAPAD; 679 680 /* 681 * Query the hal about antenna support. 682 */ 683 sc->sc_defant = ath_hal_getdefantenna(ah); 684 685 /* 686 * Not all chips have the VEOL support we want to 687 * use with IBSS beacons; check here for it. 688 */ 689 sc->sc_hasveol = ath_hal_hasveol(ah); 690 691 /* get mac address from hardware */ 692 ath_hal_getmac(ah, macaddr); 693 if (sc->sc_hasbmask) 694 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 695 696 /* NB: used to size node table key mapping array */ 697 ic->ic_max_keyix = sc->sc_keymax; 698 /* call MI attach routine. */ 699 ieee80211_ifattach(ic, macaddr); 700 ic->ic_setregdomain = ath_setregdomain; 701 ic->ic_getradiocaps = ath_getradiocaps; 702 sc->sc_opmode = HAL_M_STA; 703 704 /* override default methods */ 705 ic->ic_newassoc = ath_newassoc; 706 ic->ic_updateslot = ath_updateslot; 707 ic->ic_wme.wme_update = ath_wme_update; 708 ic->ic_vap_create = ath_vap_create; 709 ic->ic_vap_delete = ath_vap_delete; 710 ic->ic_raw_xmit = ath_raw_xmit; 711 ic->ic_update_mcast = ath_update_mcast; 712 ic->ic_update_promisc = ath_update_promisc; 713 ic->ic_node_alloc = ath_node_alloc; 714 sc->sc_node_free = ic->ic_node_free; 715 ic->ic_node_free = ath_node_free; 716 ic->ic_node_getsignal = ath_node_getsignal; 717 ic->ic_scan_start = ath_scan_start; 718 ic->ic_scan_end = ath_scan_end; 719 ic->ic_set_channel = ath_set_channel; 720 721 ieee80211_radiotap_attach(ic, 722 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 723 ATH_TX_RADIOTAP_PRESENT, 724 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 725 ATH_RX_RADIOTAP_PRESENT); 726 727 /* 728 * Setup dynamic sysctl's now that country code and 729 * regdomain are available from the hal. 730 */ 731 ath_sysctlattach(sc); 732 ath_sysctl_stats_attach(sc); 733 ath_sysctl_hal_attach(sc); 734 735 if (bootverbose) 736 ieee80211_announce(ic); 737 ath_announce(sc); 738 return 0; 739bad2: 740 ath_tx_cleanup(sc); 741 ath_desc_free(sc); 742bad: 743 if (ah) 744 ath_hal_detach(ah); 745 if (ifp != NULL) 746 if_free(ifp); 747 sc->sc_invalid = 1; 748 return error; 749} 750 751int 752ath_detach(struct ath_softc *sc) 753{ 754 struct ifnet *ifp = sc->sc_ifp; 755 756 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 757 __func__, ifp->if_flags); 758 759 /* 760 * NB: the order of these is important: 761 * o stop the chip so no more interrupts will fire 762 * o call the 802.11 layer before detaching the hal to 763 * insure callbacks into the driver to delete global 764 * key cache entries can be handled 765 * o free the taskqueue which drains any pending tasks 766 * o reclaim the tx queue data structures after calling 767 * the 802.11 layer as we'll get called back to reclaim 768 * node state and potentially want to use them 769 * o to cleanup the tx queues the hal is called, so detach 770 * it last 771 * Other than that, it's straightforward... 772 */ 773 ath_stop(ifp); 774 ieee80211_ifdetach(ifp->if_l2com); 775 taskqueue_free(sc->sc_tq); 776#ifdef ATH_TX99_DIAG 777 if (sc->sc_tx99 != NULL) 778 sc->sc_tx99->detach(sc->sc_tx99); 779#endif 780 ath_rate_detach(sc->sc_rc); 781 782 ath_dfs_detach(sc); 783 ath_desc_free(sc); 784 ath_tx_cleanup(sc); 785 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 786 if_free(ifp); 787 788 return 0; 789} 790 791/* 792 * MAC address handling for multiple BSS on the same radio. 793 * The first vap uses the MAC address from the EEPROM. For 794 * subsequent vap's we set the U/L bit (bit 1) in the MAC 795 * address and use the next six bits as an index. 796 */ 797static void 798assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 799{ 800 int i; 801 802 if (clone && sc->sc_hasbmask) { 803 /* NB: we only do this if h/w supports multiple bssid */ 804 for (i = 0; i < 8; i++) 805 if ((sc->sc_bssidmask & (1<<i)) == 0) 806 break; 807 if (i != 0) 808 mac[0] |= (i << 2)|0x2; 809 } else 810 i = 0; 811 sc->sc_bssidmask |= 1<<i; 812 sc->sc_hwbssidmask[0] &= ~mac[0]; 813 if (i == 0) 814 sc->sc_nbssid0++; 815} 816 817static void 818reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 819{ 820 int i = mac[0] >> 2; 821 uint8_t mask; 822 823 if (i != 0 || --sc->sc_nbssid0 == 0) { 824 sc->sc_bssidmask &= ~(1<<i); 825 /* recalculate bssid mask from remaining addresses */ 826 mask = 0xff; 827 for (i = 1; i < 8; i++) 828 if (sc->sc_bssidmask & (1<<i)) 829 mask &= ~((i<<2)|0x2); 830 sc->sc_hwbssidmask[0] |= mask; 831 } 832} 833 834/* 835 * Assign a beacon xmit slot. We try to space out 836 * assignments so when beacons are staggered the 837 * traffic coming out of the cab q has maximal time 838 * to go out before the next beacon is scheduled. 839 */ 840static int 841assign_bslot(struct ath_softc *sc) 842{ 843 u_int slot, free; 844 845 free = 0; 846 for (slot = 0; slot < ATH_BCBUF; slot++) 847 if (sc->sc_bslot[slot] == NULL) { 848 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 849 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 850 return slot; 851 free = slot; 852 /* NB: keep looking for a double slot */ 853 } 854 return free; 855} 856 857static struct ieee80211vap * 858ath_vap_create(struct ieee80211com *ic, 859 const char name[IFNAMSIZ], int unit, int opmode, int flags, 860 const uint8_t bssid[IEEE80211_ADDR_LEN], 861 const uint8_t mac0[IEEE80211_ADDR_LEN]) 862{ 863 struct ath_softc *sc = ic->ic_ifp->if_softc; 864 struct ath_vap *avp; 865 struct ieee80211vap *vap; 866 uint8_t mac[IEEE80211_ADDR_LEN]; 867 int ic_opmode, needbeacon, error; 868 869 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 870 M_80211_VAP, M_WAITOK | M_ZERO); 871 needbeacon = 0; 872 IEEE80211_ADDR_COPY(mac, mac0); 873 874 ATH_LOCK(sc); 875 ic_opmode = opmode; /* default to opmode of new vap */ 876 switch (opmode) { 877 case IEEE80211_M_STA: 878 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 879 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 880 goto bad; 881 } 882 if (sc->sc_nvaps) { 883 /* 884 * With multiple vaps we must fall back 885 * to s/w beacon miss handling. 886 */ 887 flags |= IEEE80211_CLONE_NOBEACONS; 888 } 889 if (flags & IEEE80211_CLONE_NOBEACONS) { 890 /* 891 * Station mode w/o beacons are implemented w/ AP mode. 892 */ 893 ic_opmode = IEEE80211_M_HOSTAP; 894 } 895 break; 896 case IEEE80211_M_IBSS: 897 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 898 device_printf(sc->sc_dev, 899 "only 1 ibss vap supported\n"); 900 goto bad; 901 } 902 needbeacon = 1; 903 break; 904 case IEEE80211_M_AHDEMO: 905#ifdef IEEE80211_SUPPORT_TDMA 906 if (flags & IEEE80211_CLONE_TDMA) { 907 if (sc->sc_nvaps != 0) { 908 device_printf(sc->sc_dev, 909 "only 1 tdma vap supported\n"); 910 goto bad; 911 } 912 needbeacon = 1; 913 flags |= IEEE80211_CLONE_NOBEACONS; 914 } 915 /* fall thru... */ 916#endif 917 case IEEE80211_M_MONITOR: 918 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 919 /* 920 * Adopt existing mode. Adding a monitor or ahdemo 921 * vap to an existing configuration is of dubious 922 * value but should be ok. 923 */ 924 /* XXX not right for monitor mode */ 925 ic_opmode = ic->ic_opmode; 926 } 927 break; 928 case IEEE80211_M_HOSTAP: 929 case IEEE80211_M_MBSS: 930 needbeacon = 1; 931 break; 932 case IEEE80211_M_WDS: 933 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 934 device_printf(sc->sc_dev, 935 "wds not supported in sta mode\n"); 936 goto bad; 937 } 938 /* 939 * Silently remove any request for a unique 940 * bssid; WDS vap's always share the local 941 * mac address. 942 */ 943 flags &= ~IEEE80211_CLONE_BSSID; 944 if (sc->sc_nvaps == 0) 945 ic_opmode = IEEE80211_M_HOSTAP; 946 else 947 ic_opmode = ic->ic_opmode; 948 break; 949 default: 950 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 951 goto bad; 952 } 953 /* 954 * Check that a beacon buffer is available; the code below assumes it. 955 */ 956 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 957 device_printf(sc->sc_dev, "no beacon buffer available\n"); 958 goto bad; 959 } 960 961 /* STA, AHDEMO? */ 962 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 963 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 964 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 965 } 966 967 vap = &avp->av_vap; 968 /* XXX can't hold mutex across if_alloc */ 969 ATH_UNLOCK(sc); 970 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 971 bssid, mac); 972 ATH_LOCK(sc); 973 if (error != 0) { 974 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 975 __func__, error); 976 goto bad2; 977 } 978 979 /* h/w crypto support */ 980 vap->iv_key_alloc = ath_key_alloc; 981 vap->iv_key_delete = ath_key_delete; 982 vap->iv_key_set = ath_key_set; 983 vap->iv_key_update_begin = ath_key_update_begin; 984 vap->iv_key_update_end = ath_key_update_end; 985 986 /* override various methods */ 987 avp->av_recv_mgmt = vap->iv_recv_mgmt; 988 vap->iv_recv_mgmt = ath_recv_mgmt; 989 vap->iv_reset = ath_reset_vap; 990 vap->iv_update_beacon = ath_beacon_update; 991 avp->av_newstate = vap->iv_newstate; 992 vap->iv_newstate = ath_newstate; 993 avp->av_bmiss = vap->iv_bmiss; 994 vap->iv_bmiss = ath_bmiss_vap; 995 996 /* Set default parameters */ 997 998 /* 999 * Anything earlier than some AR9300 series MACs don't 1000 * support a smaller MPDU density. 1001 */ 1002 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1003 /* 1004 * All NICs can handle the maximum size, however 1005 * AR5416 based MACs can only TX aggregates w/ RTS 1006 * protection when the total aggregate size is <= 8k. 1007 * However, for now that's enforced by the TX path. 1008 */ 1009 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1010 1011 avp->av_bslot = -1; 1012 if (needbeacon) { 1013 /* 1014 * Allocate beacon state and setup the q for buffered 1015 * multicast frames. We know a beacon buffer is 1016 * available because we checked above. 1017 */ 1018 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1019 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1020 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1021 /* 1022 * Assign the vap to a beacon xmit slot. As above 1023 * this cannot fail to find a free one. 1024 */ 1025 avp->av_bslot = assign_bslot(sc); 1026 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1027 ("beacon slot %u not empty", avp->av_bslot)); 1028 sc->sc_bslot[avp->av_bslot] = vap; 1029 sc->sc_nbcnvaps++; 1030 } 1031 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1032 /* 1033 * Multple vaps are to transmit beacons and we 1034 * have h/w support for TSF adjusting; enable 1035 * use of staggered beacons. 1036 */ 1037 sc->sc_stagbeacons = 1; 1038 } 1039 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1040 } 1041 1042 ic->ic_opmode = ic_opmode; 1043 if (opmode != IEEE80211_M_WDS) { 1044 sc->sc_nvaps++; 1045 if (opmode == IEEE80211_M_STA) 1046 sc->sc_nstavaps++; 1047 if (opmode == IEEE80211_M_MBSS) 1048 sc->sc_nmeshvaps++; 1049 } 1050 switch (ic_opmode) { 1051 case IEEE80211_M_IBSS: 1052 sc->sc_opmode = HAL_M_IBSS; 1053 break; 1054 case IEEE80211_M_STA: 1055 sc->sc_opmode = HAL_M_STA; 1056 break; 1057 case IEEE80211_M_AHDEMO: 1058#ifdef IEEE80211_SUPPORT_TDMA 1059 if (vap->iv_caps & IEEE80211_C_TDMA) { 1060 sc->sc_tdma = 1; 1061 /* NB: disable tsf adjust */ 1062 sc->sc_stagbeacons = 0; 1063 } 1064 /* 1065 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1066 * just ap mode. 1067 */ 1068 /* fall thru... */ 1069#endif 1070 case IEEE80211_M_HOSTAP: 1071 case IEEE80211_M_MBSS: 1072 sc->sc_opmode = HAL_M_HOSTAP; 1073 break; 1074 case IEEE80211_M_MONITOR: 1075 sc->sc_opmode = HAL_M_MONITOR; 1076 break; 1077 default: 1078 /* XXX should not happen */ 1079 break; 1080 } 1081 if (sc->sc_hastsfadd) { 1082 /* 1083 * Configure whether or not TSF adjust should be done. 1084 */ 1085 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1086 } 1087 if (flags & IEEE80211_CLONE_NOBEACONS) { 1088 /* 1089 * Enable s/w beacon miss handling. 1090 */ 1091 sc->sc_swbmiss = 1; 1092 } 1093 ATH_UNLOCK(sc); 1094 1095 /* complete setup */ 1096 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1097 return vap; 1098bad2: 1099 reclaim_address(sc, mac); 1100 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1101bad: 1102 free(avp, M_80211_VAP); 1103 ATH_UNLOCK(sc); 1104 return NULL; 1105} 1106 1107static void 1108ath_vap_delete(struct ieee80211vap *vap) 1109{ 1110 struct ieee80211com *ic = vap->iv_ic; 1111 struct ifnet *ifp = ic->ic_ifp; 1112 struct ath_softc *sc = ifp->if_softc; 1113 struct ath_hal *ah = sc->sc_ah; 1114 struct ath_vap *avp = ATH_VAP(vap); 1115 1116 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1117 /* 1118 * Quiesce the hardware while we remove the vap. In 1119 * particular we need to reclaim all references to 1120 * the vap state by any frames pending on the tx queues. 1121 */ 1122 ath_hal_intrset(ah, 0); /* disable interrupts */ 1123 ath_draintxq(sc); /* stop xmit side */ 1124 ath_stoprecv(sc); /* stop recv side */ 1125 } 1126 1127 ieee80211_vap_detach(vap); 1128 ATH_LOCK(sc); 1129 /* 1130 * Reclaim beacon state. Note this must be done before 1131 * the vap instance is reclaimed as we may have a reference 1132 * to it in the buffer for the beacon frame. 1133 */ 1134 if (avp->av_bcbuf != NULL) { 1135 if (avp->av_bslot != -1) { 1136 sc->sc_bslot[avp->av_bslot] = NULL; 1137 sc->sc_nbcnvaps--; 1138 } 1139 ath_beacon_return(sc, avp->av_bcbuf); 1140 avp->av_bcbuf = NULL; 1141 if (sc->sc_nbcnvaps == 0) { 1142 sc->sc_stagbeacons = 0; 1143 if (sc->sc_hastsfadd) 1144 ath_hal_settsfadjust(sc->sc_ah, 0); 1145 } 1146 /* 1147 * Reclaim any pending mcast frames for the vap. 1148 */ 1149 ath_tx_draintxq(sc, &avp->av_mcastq); 1150 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1151 } 1152 /* 1153 * Update bookkeeping. 1154 */ 1155 if (vap->iv_opmode == IEEE80211_M_STA) { 1156 sc->sc_nstavaps--; 1157 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1158 sc->sc_swbmiss = 0; 1159 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1160 vap->iv_opmode == IEEE80211_M_MBSS) { 1161 reclaim_address(sc, vap->iv_myaddr); 1162 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1163 if (vap->iv_opmode == IEEE80211_M_MBSS) 1164 sc->sc_nmeshvaps--; 1165 } 1166 if (vap->iv_opmode != IEEE80211_M_WDS) 1167 sc->sc_nvaps--; 1168#ifdef IEEE80211_SUPPORT_TDMA 1169 /* TDMA operation ceases when the last vap is destroyed */ 1170 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1171 sc->sc_tdma = 0; 1172 sc->sc_swbmiss = 0; 1173 } 1174#endif 1175 ATH_UNLOCK(sc); 1176 free(avp, M_80211_VAP); 1177 1178 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1179 /* 1180 * Restart rx+tx machines if still running (RUNNING will 1181 * be reset if we just destroyed the last vap). 1182 */ 1183 if (ath_startrecv(sc) != 0) 1184 if_printf(ifp, "%s: unable to restart recv logic\n", 1185 __func__); 1186 if (sc->sc_beacons) { /* restart beacons */ 1187#ifdef IEEE80211_SUPPORT_TDMA 1188 if (sc->sc_tdma) 1189 ath_tdma_config(sc, NULL); 1190 else 1191#endif 1192 ath_beacon_config(sc, NULL); 1193 } 1194 ath_hal_intrset(ah, sc->sc_imask); 1195 } 1196} 1197 1198void 1199ath_suspend(struct ath_softc *sc) 1200{ 1201 struct ifnet *ifp = sc->sc_ifp; 1202 struct ieee80211com *ic = ifp->if_l2com; 1203 1204 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1205 __func__, ifp->if_flags); 1206 1207 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1208 if (ic->ic_opmode == IEEE80211_M_STA) 1209 ath_stop(ifp); 1210 else 1211 ieee80211_suspend_all(ic); 1212 /* 1213 * NB: don't worry about putting the chip in low power 1214 * mode; pci will power off our socket on suspend and 1215 * CardBus detaches the device. 1216 */ 1217} 1218 1219/* 1220 * Reset the key cache since some parts do not reset the 1221 * contents on resume. First we clear all entries, then 1222 * re-load keys that the 802.11 layer assumes are setup 1223 * in h/w. 1224 */ 1225static void 1226ath_reset_keycache(struct ath_softc *sc) 1227{ 1228 struct ifnet *ifp = sc->sc_ifp; 1229 struct ieee80211com *ic = ifp->if_l2com; 1230 struct ath_hal *ah = sc->sc_ah; 1231 int i; 1232 1233 for (i = 0; i < sc->sc_keymax; i++) 1234 ath_hal_keyreset(ah, i); 1235 ieee80211_crypto_reload_keys(ic); 1236} 1237 1238void 1239ath_resume(struct ath_softc *sc) 1240{ 1241 struct ifnet *ifp = sc->sc_ifp; 1242 struct ieee80211com *ic = ifp->if_l2com; 1243 struct ath_hal *ah = sc->sc_ah; 1244 HAL_STATUS status; 1245 1246 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1247 __func__, ifp->if_flags); 1248 1249 /* 1250 * Must reset the chip before we reload the 1251 * keycache as we were powered down on suspend. 1252 */ 1253 ath_hal_reset(ah, sc->sc_opmode, 1254 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1255 AH_FALSE, &status); 1256 ath_reset_keycache(sc); 1257 1258 /* Let DFS at it in case it's a DFS channel */ 1259 ath_dfs_radar_enable(sc, ic->ic_curchan); 1260 1261 if (sc->sc_resume_up) { 1262 if (ic->ic_opmode == IEEE80211_M_STA) { 1263 ath_init(sc); 1264 /* 1265 * Program the beacon registers using the last rx'd 1266 * beacon frame and enable sync on the next beacon 1267 * we see. This should handle the case where we 1268 * wakeup and find the same AP and also the case where 1269 * we wakeup and need to roam. For the latter we 1270 * should get bmiss events that trigger a roam. 1271 */ 1272 ath_beacon_config(sc, NULL); 1273 sc->sc_syncbeacon = 1; 1274 } else 1275 ieee80211_resume_all(ic); 1276 } 1277 if (sc->sc_softled) { 1278 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin, 1279 HAL_GPIO_MUX_MAC_NETWORK_LED); 1280 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); 1281 } 1282 1283 /* XXX beacons ? */ 1284} 1285 1286void 1287ath_shutdown(struct ath_softc *sc) 1288{ 1289 struct ifnet *ifp = sc->sc_ifp; 1290 1291 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1292 __func__, ifp->if_flags); 1293 1294 ath_stop(ifp); 1295 /* NB: no point powering down chip as we're about to reboot */ 1296} 1297 1298/* 1299 * Interrupt handler. Most of the actual processing is deferred. 1300 */ 1301void 1302ath_intr(void *arg) 1303{ 1304 struct ath_softc *sc = arg; 1305 struct ifnet *ifp = sc->sc_ifp; 1306 struct ath_hal *ah = sc->sc_ah; 1307 HAL_INT status = 0; 1308 uint32_t txqs; 1309 1310 if (sc->sc_invalid) { 1311 /* 1312 * The hardware is not ready/present, don't touch anything. 1313 * Note this can happen early on if the IRQ is shared. 1314 */ 1315 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1316 return; 1317 } 1318 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ 1319 return; 1320 if ((ifp->if_flags & IFF_UP) == 0 || 1321 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1322 HAL_INT status; 1323 1324 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1325 __func__, ifp->if_flags); 1326 ath_hal_getisr(ah, &status); /* clear ISR */ 1327 ath_hal_intrset(ah, 0); /* disable further intr's */ 1328 return; 1329 } 1330 /* 1331 * Figure out the reason(s) for the interrupt. Note 1332 * that the hal returns a pseudo-ISR that may include 1333 * bits we haven't explicitly enabled so we mask the 1334 * value to insure we only process bits we requested. 1335 */ 1336 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1337 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1338 status &= sc->sc_imask; /* discard unasked for bits */ 1339 1340 /* Short-circuit un-handled interrupts */ 1341 if (status == 0x0) 1342 return; 1343 1344 if (status & HAL_INT_FATAL) { 1345 sc->sc_stats.ast_hardware++; 1346 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1347 ath_fatal_proc(sc, 0); 1348 } else { 1349 if (status & HAL_INT_SWBA) { 1350 /* 1351 * Software beacon alert--time to send a beacon. 1352 * Handle beacon transmission directly; deferring 1353 * this is too slow to meet timing constraints 1354 * under load. 1355 */ 1356#ifdef IEEE80211_SUPPORT_TDMA 1357 if (sc->sc_tdma) { 1358 if (sc->sc_tdmaswba == 0) { 1359 struct ieee80211com *ic = ifp->if_l2com; 1360 struct ieee80211vap *vap = 1361 TAILQ_FIRST(&ic->ic_vaps); 1362 ath_tdma_beacon_send(sc, vap); 1363 sc->sc_tdmaswba = 1364 vap->iv_tdma->tdma_bintval; 1365 } else 1366 sc->sc_tdmaswba--; 1367 } else 1368#endif 1369 { 1370 ath_beacon_proc(sc, 0); 1371#ifdef IEEE80211_SUPPORT_SUPERG 1372 /* 1373 * Schedule the rx taskq in case there's no 1374 * traffic so any frames held on the staging 1375 * queue are aged and potentially flushed. 1376 */ 1377 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1378#endif 1379 } 1380 } 1381 if (status & HAL_INT_RXEOL) { 1382 int imask; 1383 /* 1384 * NB: the hardware should re-read the link when 1385 * RXE bit is written, but it doesn't work at 1386 * least on older hardware revs. 1387 */ 1388 sc->sc_stats.ast_rxeol++; 1389 /* 1390 * Disable RXEOL/RXORN - prevent an interrupt 1391 * storm until the PCU logic can be reset. 1392 * In case the interface is reset some other 1393 * way before "sc_kickpcu" is called, don't 1394 * modify sc_imask - that way if it is reset 1395 * by a call to ath_reset() somehow, the 1396 * interrupt mask will be correctly reprogrammed. 1397 */ 1398 ATH_LOCK(sc); 1399 imask = sc->sc_imask; 1400 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1401 ath_hal_intrset(ah, imask); 1402 /* 1403 * Only blank sc_rxlink if we've not yet kicked 1404 * the PCU. 1405 * 1406 * This isn't entirely correct - the correct solution 1407 * would be to have a PCU lock and engage that for 1408 * the duration of the PCU fiddling; which would include 1409 * running the RX process. Otherwise we could end up 1410 * messing up the RX descriptor chain and making the 1411 * RX desc list much shorter. 1412 */ 1413 if (! sc->sc_kickpcu) 1414 sc->sc_rxlink = NULL; 1415 sc->sc_kickpcu = 1; 1416 ATH_UNLOCK(sc); 1417 /* 1418 * Enqueue an RX proc, to handled whatever 1419 * is in the RX queue. 1420 * This will then kick the PCU. 1421 */ 1422 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1423 } 1424 if (status & HAL_INT_TXURN) { 1425 sc->sc_stats.ast_txurn++; 1426 /* bump tx trigger level */ 1427 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1428 } 1429 if (status & HAL_INT_RX) { 1430 sc->sc_stats.ast_rx_intr++; 1431 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1432 } 1433 if (status & HAL_INT_TX) { 1434 sc->sc_stats.ast_tx_intr++; 1435 /* 1436 * Grab all the currently set bits in the HAL txq bitmap 1437 * and blank them. This is the only place we should be 1438 * doing this. 1439 */ 1440 ATH_LOCK(sc); 1441 txqs = 0xffffffff; 1442 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1443 sc->sc_txq_active |= txqs; 1444 ATH_UNLOCK(sc); 1445 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1446 } 1447 if (status & HAL_INT_BMISS) { 1448 sc->sc_stats.ast_bmiss++; 1449 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1450 } 1451 if (status & HAL_INT_GTT) 1452 sc->sc_stats.ast_tx_timeout++; 1453 if (status & HAL_INT_CST) 1454 sc->sc_stats.ast_tx_cst++; 1455 if (status & HAL_INT_MIB) { 1456 sc->sc_stats.ast_mib++; 1457 /* 1458 * Disable interrupts until we service the MIB 1459 * interrupt; otherwise it will continue to fire. 1460 */ 1461 ath_hal_intrset(ah, 0); 1462 /* 1463 * Let the hal handle the event. We assume it will 1464 * clear whatever condition caused the interrupt. 1465 */ 1466 ath_hal_mibevent(ah, &sc->sc_halstats); 1467 /* 1468 * Don't reset the interrupt if we've just 1469 * kicked the PCU, or we may get a nested 1470 * RXEOL before the rxproc has had a chance 1471 * to run. 1472 */ 1473 ATH_LOCK(sc); 1474 if (sc->sc_kickpcu == 0) 1475 ath_hal_intrset(ah, sc->sc_imask); 1476 ATH_UNLOCK(sc); 1477 } 1478 if (status & HAL_INT_RXORN) { 1479 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1480 sc->sc_stats.ast_rxorn++; 1481 } 1482 } 1483} 1484 1485static void 1486ath_fatal_proc(void *arg, int pending) 1487{ 1488 struct ath_softc *sc = arg; 1489 struct ifnet *ifp = sc->sc_ifp; 1490 u_int32_t *state; 1491 u_int32_t len; 1492 void *sp; 1493 1494 if_printf(ifp, "hardware error; resetting\n"); 1495 /* 1496 * Fatal errors are unrecoverable. Typically these 1497 * are caused by DMA errors. Collect h/w state from 1498 * the hal so we can diagnose what's going on. 1499 */ 1500 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1501 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1502 state = sp; 1503 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1504 state[0], state[1] , state[2], state[3], 1505 state[4], state[5]); 1506 } 1507 ath_reset(ifp); 1508} 1509 1510static void 1511ath_bmiss_vap(struct ieee80211vap *vap) 1512{ 1513 /* 1514 * Workaround phantom bmiss interrupts by sanity-checking 1515 * the time of our last rx'd frame. If it is within the 1516 * beacon miss interval then ignore the interrupt. If it's 1517 * truly a bmiss we'll get another interrupt soon and that'll 1518 * be dispatched up for processing. Note this applies only 1519 * for h/w beacon miss events. 1520 */ 1521 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1522 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1523 struct ath_softc *sc = ifp->if_softc; 1524 u_int64_t lastrx = sc->sc_lastrx; 1525 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1526 u_int bmisstimeout = 1527 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1528 1529 DPRINTF(sc, ATH_DEBUG_BEACON, 1530 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1531 __func__, (unsigned long long) tsf, 1532 (unsigned long long)(tsf - lastrx), 1533 (unsigned long long) lastrx, bmisstimeout); 1534 1535 if (tsf - lastrx <= bmisstimeout) { 1536 sc->sc_stats.ast_bmiss_phantom++; 1537 return; 1538 } 1539 } 1540 ATH_VAP(vap)->av_bmiss(vap); 1541} 1542 1543static int 1544ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1545{ 1546 uint32_t rsize; 1547 void *sp; 1548 1549 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1550 return 0; 1551 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1552 *hangs = *(uint32_t *)sp; 1553 return 1; 1554} 1555 1556static void 1557ath_bmiss_proc(void *arg, int pending) 1558{ 1559 struct ath_softc *sc = arg; 1560 struct ifnet *ifp = sc->sc_ifp; 1561 uint32_t hangs; 1562 1563 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1564 1565 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1566 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1567 ath_reset(ifp); 1568 } else 1569 ieee80211_beacon_miss(ifp->if_l2com); 1570} 1571 1572/* 1573 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1574 * calcs together with WME. If necessary disable the crypto 1575 * hardware and mark the 802.11 state so keys will be setup 1576 * with the MIC work done in software. 1577 */ 1578static void 1579ath_settkipmic(struct ath_softc *sc) 1580{ 1581 struct ifnet *ifp = sc->sc_ifp; 1582 struct ieee80211com *ic = ifp->if_l2com; 1583 1584 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1585 if (ic->ic_flags & IEEE80211_F_WME) { 1586 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1587 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1588 } else { 1589 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1590 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1591 } 1592 } 1593} 1594 1595static void 1596ath_init(void *arg) 1597{ 1598 struct ath_softc *sc = (struct ath_softc *) arg; 1599 struct ifnet *ifp = sc->sc_ifp; 1600 struct ieee80211com *ic = ifp->if_l2com; 1601 struct ath_hal *ah = sc->sc_ah; 1602 HAL_STATUS status; 1603 1604 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1605 __func__, ifp->if_flags); 1606 1607 ATH_LOCK(sc); 1608 /* 1609 * Stop anything previously setup. This is safe 1610 * whether this is the first time through or not. 1611 */ 1612 ath_stop_locked(ifp); 1613 1614 /* 1615 * The basic interface to setting the hardware in a good 1616 * state is ``reset''. On return the hardware is known to 1617 * be powered up and with interrupts disabled. This must 1618 * be followed by initialization of the appropriate bits 1619 * and then setup of the interrupt mask. 1620 */ 1621 ath_settkipmic(sc); 1622 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1623 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1624 status); 1625 ATH_UNLOCK(sc); 1626 return; 1627 } 1628 ath_chan_change(sc, ic->ic_curchan); 1629 1630 /* Let DFS at it in case it's a DFS channel */ 1631 ath_dfs_radar_enable(sc, ic->ic_curchan); 1632 1633 /* 1634 * Likewise this is set during reset so update 1635 * state cached in the driver. 1636 */ 1637 sc->sc_diversity = ath_hal_getdiversity(ah); 1638 sc->sc_lastlongcal = 0; 1639 sc->sc_resetcal = 1; 1640 sc->sc_lastcalreset = 0; 1641 sc->sc_lastani = 0; 1642 sc->sc_lastshortcal = 0; 1643 sc->sc_doresetcal = AH_FALSE; 1644 /* 1645 * Beacon timers were cleared here; give ath_newstate() 1646 * a hint that the beacon timers should be poked when 1647 * things transition to the RUN state. 1648 */ 1649 sc->sc_beacons = 0; 1650 1651 /* 1652 * Initial aggregation settings. 1653 */ 1654 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 1655 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1656 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1657 1658 /* 1659 * Setup the hardware after reset: the key cache 1660 * is filled as needed and the receive engine is 1661 * set going. Frame transmit is handled entirely 1662 * in the frame output path; there's nothing to do 1663 * here except setup the interrupt mask. 1664 */ 1665 if (ath_startrecv(sc) != 0) { 1666 if_printf(ifp, "unable to start recv logic\n"); 1667 ATH_UNLOCK(sc); 1668 return; 1669 } 1670 1671 /* 1672 * Enable interrupts. 1673 */ 1674 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1675 | HAL_INT_RXEOL | HAL_INT_RXORN 1676 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1677 /* 1678 * Enable MIB interrupts when there are hardware phy counters. 1679 * Note we only do this (at the moment) for station mode. 1680 */ 1681 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1682 sc->sc_imask |= HAL_INT_MIB; 1683 1684 /* Enable global TX timeout and carrier sense timeout if available */ 1685 if (ath_hal_gtxto_supported(ah)) 1686 sc->sc_imask |= HAL_INT_GTT; 1687 1688 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1689 __func__, sc->sc_imask); 1690 1691 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1692 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1693 ath_hal_intrset(ah, sc->sc_imask); 1694 1695 ATH_UNLOCK(sc); 1696 1697#ifdef ATH_TX99_DIAG 1698 if (sc->sc_tx99 != NULL) 1699 sc->sc_tx99->start(sc->sc_tx99); 1700 else 1701#endif 1702 ieee80211_start_all(ic); /* start all vap's */ 1703} 1704 1705static void 1706ath_stop_locked(struct ifnet *ifp) 1707{ 1708 struct ath_softc *sc = ifp->if_softc; 1709 struct ath_hal *ah = sc->sc_ah; 1710 1711 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1712 __func__, sc->sc_invalid, ifp->if_flags); 1713 1714 ATH_LOCK_ASSERT(sc); 1715 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1716 /* 1717 * Shutdown the hardware and driver: 1718 * reset 802.11 state machine 1719 * turn off timers 1720 * disable interrupts 1721 * turn off the radio 1722 * clear transmit machinery 1723 * clear receive machinery 1724 * drain and release tx queues 1725 * reclaim beacon resources 1726 * power down hardware 1727 * 1728 * Note that some of this work is not possible if the 1729 * hardware is gone (invalid). 1730 */ 1731#ifdef ATH_TX99_DIAG 1732 if (sc->sc_tx99 != NULL) 1733 sc->sc_tx99->stop(sc->sc_tx99); 1734#endif 1735 callout_stop(&sc->sc_wd_ch); 1736 sc->sc_wd_timer = 0; 1737 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1738 if (!sc->sc_invalid) { 1739 if (sc->sc_softled) { 1740 callout_stop(&sc->sc_ledtimer); 1741 ath_hal_gpioset(ah, sc->sc_ledpin, 1742 !sc->sc_ledon); 1743 sc->sc_blinking = 0; 1744 } 1745 ath_hal_intrset(ah, 0); 1746 } 1747 ath_draintxq(sc); 1748 if (!sc->sc_invalid) { 1749 ath_stoprecv(sc); 1750 ath_hal_phydisable(ah); 1751 } else 1752 sc->sc_rxlink = NULL; 1753 ath_beacon_free(sc); /* XXX not needed */ 1754 } 1755} 1756 1757static void 1758ath_stop(struct ifnet *ifp) 1759{ 1760 struct ath_softc *sc = ifp->if_softc; 1761 1762 ATH_LOCK(sc); 1763 ath_stop_locked(ifp); 1764 ATH_UNLOCK(sc); 1765} 1766 1767/* 1768 * Reset the hardware w/o losing operational state. This is 1769 * basically a more efficient way of doing ath_stop, ath_init, 1770 * followed by state transitions to the current 802.11 1771 * operational state. Used to recover from various errors and 1772 * to reset or reload hardware state. 1773 */ 1774int 1775ath_reset(struct ifnet *ifp) 1776{ 1777 struct ath_softc *sc = ifp->if_softc; 1778 struct ieee80211com *ic = ifp->if_l2com; 1779 struct ath_hal *ah = sc->sc_ah; 1780 HAL_STATUS status; 1781 1782 ath_hal_intrset(ah, 0); /* disable interrupts */ 1783 ath_draintxq(sc); /* stop xmit side */ 1784 ath_stoprecv(sc); /* stop recv side */ 1785 ath_settkipmic(sc); /* configure TKIP MIC handling */ 1786 /* NB: indicate channel change so we do a full reset */ 1787 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 1788 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 1789 __func__, status); 1790 sc->sc_diversity = ath_hal_getdiversity(ah); 1791 1792 /* Let DFS at it in case it's a DFS channel */ 1793 ath_dfs_radar_enable(sc, ic->ic_curchan); 1794 1795 if (ath_startrecv(sc) != 0) /* restart recv */ 1796 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 1797 /* 1798 * We may be doing a reset in response to an ioctl 1799 * that changes the channel so update any state that 1800 * might change as a result. 1801 */ 1802 ath_chan_change(sc, ic->ic_curchan); 1803 if (sc->sc_beacons) { /* restart beacons */ 1804#ifdef IEEE80211_SUPPORT_TDMA 1805 if (sc->sc_tdma) 1806 ath_tdma_config(sc, NULL); 1807 else 1808#endif 1809 ath_beacon_config(sc, NULL); 1810 } 1811 ath_hal_intrset(ah, sc->sc_imask); 1812 1813 ath_start(ifp); /* restart xmit */ 1814 return 0; 1815} 1816 1817static int 1818ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 1819{ 1820 struct ieee80211com *ic = vap->iv_ic; 1821 struct ifnet *ifp = ic->ic_ifp; 1822 struct ath_softc *sc = ifp->if_softc; 1823 struct ath_hal *ah = sc->sc_ah; 1824 1825 switch (cmd) { 1826 case IEEE80211_IOC_TXPOWER: 1827 /* 1828 * If per-packet TPC is enabled, then we have nothing 1829 * to do; otherwise we need to force the global limit. 1830 * All this can happen directly; no need to reset. 1831 */ 1832 if (!ath_hal_gettpc(ah)) 1833 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 1834 return 0; 1835 } 1836 return ath_reset(ifp); 1837} 1838 1839struct ath_buf * 1840_ath_getbuf_locked(struct ath_softc *sc) 1841{ 1842 struct ath_buf *bf; 1843 1844 ATH_TXBUF_LOCK_ASSERT(sc); 1845 1846 bf = TAILQ_FIRST(&sc->sc_txbuf); 1847 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) 1848 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 1849 else 1850 bf = NULL; 1851 if (bf == NULL) { 1852 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 1853 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 1854 "out of xmit buffers" : "xmit buffer busy"); 1855 } 1856 return bf; 1857} 1858 1859struct ath_buf * 1860ath_getbuf(struct ath_softc *sc) 1861{ 1862 struct ath_buf *bf; 1863 1864 ATH_TXBUF_LOCK(sc); 1865 bf = _ath_getbuf_locked(sc); 1866 if (bf == NULL) { 1867 struct ifnet *ifp = sc->sc_ifp; 1868 1869 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 1870 sc->sc_stats.ast_tx_qstop++; 1871 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1872 } 1873 ATH_TXBUF_UNLOCK(sc); 1874 return bf; 1875} 1876 1877static void 1878ath_start(struct ifnet *ifp) 1879{ 1880 struct ath_softc *sc = ifp->if_softc; 1881 struct ieee80211_node *ni; 1882 struct ath_buf *bf; 1883 struct mbuf *m, *next; 1884 ath_bufhead frags; 1885 1886 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 1887 return; 1888 for (;;) { 1889 /* 1890 * Grab a TX buffer and associated resources. 1891 */ 1892 bf = ath_getbuf(sc); 1893 if (bf == NULL) 1894 break; 1895 1896 IFQ_DEQUEUE(&ifp->if_snd, m); 1897 if (m == NULL) { 1898 ATH_TXBUF_LOCK(sc); 1899 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 1900 ATH_TXBUF_UNLOCK(sc); 1901 break; 1902 } 1903 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1904 /* 1905 * Check for fragmentation. If this frame 1906 * has been broken up verify we have enough 1907 * buffers to send all the fragments so all 1908 * go out or none... 1909 */ 1910 TAILQ_INIT(&frags); 1911 if ((m->m_flags & M_FRAG) && 1912 !ath_txfrag_setup(sc, &frags, m, ni)) { 1913 DPRINTF(sc, ATH_DEBUG_XMIT, 1914 "%s: out of txfrag buffers\n", __func__); 1915 sc->sc_stats.ast_tx_nofrag++; 1916 ifp->if_oerrors++; 1917 ath_freetx(m); 1918 goto bad; 1919 } 1920 ifp->if_opackets++; 1921 nextfrag: 1922 /* 1923 * Pass the frame to the h/w for transmission. 1924 * Fragmented frames have each frag chained together 1925 * with m_nextpkt. We know there are sufficient ath_buf's 1926 * to send all the frags because of work done by 1927 * ath_txfrag_setup. We leave m_nextpkt set while 1928 * calling ath_tx_start so it can use it to extend the 1929 * the tx duration to cover the subsequent frag and 1930 * so it can reclaim all the mbufs in case of an error; 1931 * ath_tx_start clears m_nextpkt once it commits to 1932 * handing the frame to the hardware. 1933 */ 1934 next = m->m_nextpkt; 1935 if (ath_tx_start(sc, ni, bf, m)) { 1936 bad: 1937 ifp->if_oerrors++; 1938 reclaim: 1939 bf->bf_m = NULL; 1940 bf->bf_node = NULL; 1941 ATH_TXBUF_LOCK(sc); 1942 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 1943 ath_txfrag_cleanup(sc, &frags, ni); 1944 ATH_TXBUF_UNLOCK(sc); 1945 if (ni != NULL) 1946 ieee80211_free_node(ni); 1947 continue; 1948 } 1949 if (next != NULL) { 1950 /* 1951 * Beware of state changing between frags. 1952 * XXX check sta power-save state? 1953 */ 1954 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 1955 DPRINTF(sc, ATH_DEBUG_XMIT, 1956 "%s: flush fragmented packet, state %s\n", 1957 __func__, 1958 ieee80211_state_name[ni->ni_vap->iv_state]); 1959 ath_freetx(next); 1960 goto reclaim; 1961 } 1962 m = next; 1963 bf = TAILQ_FIRST(&frags); 1964 KASSERT(bf != NULL, ("no buf for txfrag")); 1965 TAILQ_REMOVE(&frags, bf, bf_list); 1966 goto nextfrag; 1967 } 1968 1969 sc->sc_wd_timer = 5; 1970 } 1971} 1972 1973static int 1974ath_media_change(struct ifnet *ifp) 1975{ 1976 int error = ieee80211_media_change(ifp); 1977 /* NB: only the fixed rate can change and that doesn't need a reset */ 1978 return (error == ENETRESET ? 0 : error); 1979} 1980 1981/* 1982 * Block/unblock tx+rx processing while a key change is done. 1983 * We assume the caller serializes key management operations 1984 * so we only need to worry about synchronization with other 1985 * uses that originate in the driver. 1986 */ 1987static void 1988ath_key_update_begin(struct ieee80211vap *vap) 1989{ 1990 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1991 struct ath_softc *sc = ifp->if_softc; 1992 1993 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 1994 taskqueue_block(sc->sc_tq); 1995 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 1996} 1997 1998static void 1999ath_key_update_end(struct ieee80211vap *vap) 2000{ 2001 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2002 struct ath_softc *sc = ifp->if_softc; 2003 2004 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2005 IF_UNLOCK(&ifp->if_snd); 2006 taskqueue_unblock(sc->sc_tq); 2007} 2008 2009/* 2010 * Calculate the receive filter according to the 2011 * operating mode and state: 2012 * 2013 * o always accept unicast, broadcast, and multicast traffic 2014 * o accept PHY error frames when hardware doesn't have MIB support 2015 * to count and we need them for ANI (sta mode only until recently) 2016 * and we are not scanning (ANI is disabled) 2017 * NB: older hal's add rx filter bits out of sight and we need to 2018 * blindly preserve them 2019 * o probe request frames are accepted only when operating in 2020 * hostap, adhoc, mesh, or monitor modes 2021 * o enable promiscuous mode 2022 * - when in monitor mode 2023 * - if interface marked PROMISC (assumes bridge setting is filtered) 2024 * o accept beacons: 2025 * - when operating in station mode for collecting rssi data when 2026 * the station is otherwise quiet, or 2027 * - when operating in adhoc mode so the 802.11 layer creates 2028 * node table entries for peers, 2029 * - when scanning 2030 * - when doing s/w beacon miss (e.g. for ap+sta) 2031 * - when operating in ap mode in 11g to detect overlapping bss that 2032 * require protection 2033 * - when operating in mesh mode to detect neighbors 2034 * o accept control frames: 2035 * - when in monitor mode 2036 * XXX HT protection for 11n 2037 */ 2038static u_int32_t 2039ath_calcrxfilter(struct ath_softc *sc) 2040{ 2041 struct ifnet *ifp = sc->sc_ifp; 2042 struct ieee80211com *ic = ifp->if_l2com; 2043 u_int32_t rfilt; 2044 2045 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; 2046 if (!sc->sc_needmib && !sc->sc_scanning) 2047 rfilt |= HAL_RX_FILTER_PHYERR; 2048 if (ic->ic_opmode != IEEE80211_M_STA) 2049 rfilt |= HAL_RX_FILTER_PROBEREQ; 2050 /* XXX ic->ic_monvaps != 0? */ 2051 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) 2052 rfilt |= HAL_RX_FILTER_PROM; 2053 if (ic->ic_opmode == IEEE80211_M_STA || 2054 ic->ic_opmode == IEEE80211_M_IBSS || 2055 sc->sc_swbmiss || sc->sc_scanning) 2056 rfilt |= HAL_RX_FILTER_BEACON; 2057 /* 2058 * NB: We don't recalculate the rx filter when 2059 * ic_protmode changes; otherwise we could do 2060 * this only when ic_protmode != NONE. 2061 */ 2062 if (ic->ic_opmode == IEEE80211_M_HOSTAP && 2063 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) 2064 rfilt |= HAL_RX_FILTER_BEACON; 2065 2066 /* 2067 * Enable hardware PS-POLL RX only for hostap mode; 2068 * STA mode sends PS-POLL frames but never 2069 * receives them. 2070 */ 2071 if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 2072 0, NULL) == HAL_OK && 2073 ic->ic_opmode == IEEE80211_M_HOSTAP) 2074 rfilt |= HAL_RX_FILTER_PSPOLL; 2075 2076 if (sc->sc_nmeshvaps) { 2077 rfilt |= HAL_RX_FILTER_BEACON; 2078 if (sc->sc_hasbmatch) 2079 rfilt |= HAL_RX_FILTER_BSSID; 2080 else 2081 rfilt |= HAL_RX_FILTER_PROM; 2082 } 2083 if (ic->ic_opmode == IEEE80211_M_MONITOR) 2084 rfilt |= HAL_RX_FILTER_CONTROL; 2085 2086 /* 2087 * Enable RX of compressed BAR frames only when doing 2088 * 802.11n. Required for A-MPDU. 2089 */ 2090 if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) 2091 rfilt |= HAL_RX_FILTER_COMPBAR; 2092 2093 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", 2094 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); 2095 return rfilt; 2096} 2097 2098static void 2099ath_update_promisc(struct ifnet *ifp) 2100{ 2101 struct ath_softc *sc = ifp->if_softc; 2102 u_int32_t rfilt; 2103 2104 /* configure rx filter */ 2105 rfilt = ath_calcrxfilter(sc); 2106 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2107 2108 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2109} 2110 2111static void 2112ath_update_mcast(struct ifnet *ifp) 2113{ 2114 struct ath_softc *sc = ifp->if_softc; 2115 u_int32_t mfilt[2]; 2116 2117 /* calculate and install multicast filter */ 2118 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2119 struct ifmultiaddr *ifma; 2120 /* 2121 * Merge multicast addresses to form the hardware filter. 2122 */ 2123 mfilt[0] = mfilt[1] = 0; 2124 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2125 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2126 caddr_t dl; 2127 u_int32_t val; 2128 u_int8_t pos; 2129 2130 /* calculate XOR of eight 6bit values */ 2131 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2132 val = LE_READ_4(dl + 0); 2133 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2134 val = LE_READ_4(dl + 3); 2135 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2136 pos &= 0x3f; 2137 mfilt[pos / 32] |= (1 << (pos % 32)); 2138 } 2139 if_maddr_runlock(ifp); 2140 } else 2141 mfilt[0] = mfilt[1] = ~0; 2142 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2143 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2144 __func__, mfilt[0], mfilt[1]); 2145} 2146 2147static void 2148ath_mode_init(struct ath_softc *sc) 2149{ 2150 struct ifnet *ifp = sc->sc_ifp; 2151 struct ath_hal *ah = sc->sc_ah; 2152 u_int32_t rfilt; 2153 2154 /* configure rx filter */ 2155 rfilt = ath_calcrxfilter(sc); 2156 ath_hal_setrxfilter(ah, rfilt); 2157 2158 /* configure operational mode */ 2159 ath_hal_setopmode(ah); 2160 2161 /* handle any link-level address change */ 2162 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2163 2164 /* calculate and install multicast filter */ 2165 ath_update_mcast(ifp); 2166} 2167 2168/* 2169 * Set the slot time based on the current setting. 2170 */ 2171static void 2172ath_setslottime(struct ath_softc *sc) 2173{ 2174 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2175 struct ath_hal *ah = sc->sc_ah; 2176 u_int usec; 2177 2178 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2179 usec = 13; 2180 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2181 usec = 21; 2182 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2183 /* honor short/long slot time only in 11g */ 2184 /* XXX shouldn't honor on pure g or turbo g channel */ 2185 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2186 usec = HAL_SLOT_TIME_9; 2187 else 2188 usec = HAL_SLOT_TIME_20; 2189 } else 2190 usec = HAL_SLOT_TIME_9; 2191 2192 DPRINTF(sc, ATH_DEBUG_RESET, 2193 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2194 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2195 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2196 2197 ath_hal_setslottime(ah, usec); 2198 sc->sc_updateslot = OK; 2199} 2200 2201/* 2202 * Callback from the 802.11 layer to update the 2203 * slot time based on the current setting. 2204 */ 2205static void 2206ath_updateslot(struct ifnet *ifp) 2207{ 2208 struct ath_softc *sc = ifp->if_softc; 2209 struct ieee80211com *ic = ifp->if_l2com; 2210 2211 /* 2212 * When not coordinating the BSS, change the hardware 2213 * immediately. For other operation we defer the change 2214 * until beacon updates have propagated to the stations. 2215 */ 2216 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2217 ic->ic_opmode == IEEE80211_M_MBSS) 2218 sc->sc_updateslot = UPDATE; 2219 else 2220 ath_setslottime(sc); 2221} 2222 2223/* 2224 * Setup a h/w transmit queue for beacons. 2225 */ 2226static int 2227ath_beaconq_setup(struct ath_hal *ah) 2228{ 2229 HAL_TXQ_INFO qi; 2230 2231 memset(&qi, 0, sizeof(qi)); 2232 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 2233 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 2234 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 2235 /* NB: for dynamic turbo, don't enable any other interrupts */ 2236 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; 2237 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); 2238} 2239 2240/* 2241 * Setup the transmit queue parameters for the beacon queue. 2242 */ 2243static int 2244ath_beaconq_config(struct ath_softc *sc) 2245{ 2246#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) 2247 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2248 struct ath_hal *ah = sc->sc_ah; 2249 HAL_TXQ_INFO qi; 2250 2251 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); 2252 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2253 ic->ic_opmode == IEEE80211_M_MBSS) { 2254 /* 2255 * Always burst out beacon and CAB traffic. 2256 */ 2257 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; 2258 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; 2259 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; 2260 } else { 2261 struct wmeParams *wmep = 2262 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; 2263 /* 2264 * Adhoc mode; important thing is to use 2x cwmin. 2265 */ 2266 qi.tqi_aifs = wmep->wmep_aifsn; 2267 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 2268 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 2269 } 2270 2271 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { 2272 device_printf(sc->sc_dev, "unable to update parameters for " 2273 "beacon hardware queue!\n"); 2274 return 0; 2275 } else { 2276 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 2277 return 1; 2278 } 2279#undef ATH_EXPONENT_TO_VALUE 2280} 2281 2282/* 2283 * Allocate and setup an initial beacon frame. 2284 */ 2285static int 2286ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) 2287{ 2288 struct ieee80211vap *vap = ni->ni_vap; 2289 struct ath_vap *avp = ATH_VAP(vap); 2290 struct ath_buf *bf; 2291 struct mbuf *m; 2292 int error; 2293 2294 bf = avp->av_bcbuf; 2295 if (bf->bf_m != NULL) { 2296 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2297 m_freem(bf->bf_m); 2298 bf->bf_m = NULL; 2299 } 2300 if (bf->bf_node != NULL) { 2301 ieee80211_free_node(bf->bf_node); 2302 bf->bf_node = NULL; 2303 } 2304 2305 /* 2306 * NB: the beacon data buffer must be 32-bit aligned; 2307 * we assume the mbuf routines will return us something 2308 * with this alignment (perhaps should assert). 2309 */ 2310 m = ieee80211_beacon_alloc(ni, &avp->av_boff); 2311 if (m == NULL) { 2312 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); 2313 sc->sc_stats.ast_be_nombuf++; 2314 return ENOMEM; 2315 } 2316 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2317 bf->bf_segs, &bf->bf_nseg, 2318 BUS_DMA_NOWAIT); 2319 if (error != 0) { 2320 device_printf(sc->sc_dev, 2321 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", 2322 __func__, error); 2323 m_freem(m); 2324 return error; 2325 } 2326 2327 /* 2328 * Calculate a TSF adjustment factor required for staggered 2329 * beacons. Note that we assume the format of the beacon 2330 * frame leaves the tstamp field immediately following the 2331 * header. 2332 */ 2333 if (sc->sc_stagbeacons && avp->av_bslot > 0) { 2334 uint64_t tsfadjust; 2335 struct ieee80211_frame *wh; 2336 2337 /* 2338 * The beacon interval is in TU's; the TSF is in usecs. 2339 * We figure out how many TU's to add to align the timestamp 2340 * then convert to TSF units and handle byte swapping before 2341 * inserting it in the frame. The hardware will then add this 2342 * each time a beacon frame is sent. Note that we align vap's 2343 * 1..N and leave vap 0 untouched. This means vap 0 has a 2344 * timestamp in one beacon interval while the others get a 2345 * timstamp aligned to the next interval. 2346 */ 2347 tsfadjust = ni->ni_intval * 2348 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; 2349 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ 2350 2351 DPRINTF(sc, ATH_DEBUG_BEACON, 2352 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", 2353 __func__, sc->sc_stagbeacons ? "stagger" : "burst", 2354 avp->av_bslot, ni->ni_intval, 2355 (long long unsigned) le64toh(tsfadjust)); 2356 2357 wh = mtod(m, struct ieee80211_frame *); 2358 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); 2359 } 2360 bf->bf_m = m; 2361 bf->bf_node = ieee80211_ref_node(ni); 2362 2363 return 0; 2364} 2365 2366/* 2367 * Setup the beacon frame for transmit. 2368 */ 2369static void 2370ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) 2371{ 2372#define USE_SHPREAMBLE(_ic) \ 2373 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ 2374 == IEEE80211_F_SHPREAMBLE) 2375 struct ieee80211_node *ni = bf->bf_node; 2376 struct ieee80211com *ic = ni->ni_ic; 2377 struct mbuf *m = bf->bf_m; 2378 struct ath_hal *ah = sc->sc_ah; 2379 struct ath_desc *ds; 2380 int flags, antenna; 2381 const HAL_RATE_TABLE *rt; 2382 u_int8_t rix, rate; 2383 2384 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", 2385 __func__, m, m->m_len); 2386 2387 /* setup descriptors */ 2388 ds = bf->bf_desc; 2389 2390 flags = HAL_TXDESC_NOACK; 2391 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { 2392 ds->ds_link = bf->bf_daddr; /* self-linked */ 2393 flags |= HAL_TXDESC_VEOL; 2394 /* 2395 * Let hardware handle antenna switching. 2396 */ 2397 antenna = sc->sc_txantenna; 2398 } else { 2399 ds->ds_link = 0; 2400 /* 2401 * Switch antenna every 4 beacons. 2402 * XXX assumes two antenna 2403 */ 2404 if (sc->sc_txantenna != 0) 2405 antenna = sc->sc_txantenna; 2406 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) 2407 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); 2408 else 2409 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); 2410 } 2411 2412 KASSERT(bf->bf_nseg == 1, 2413 ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); 2414 ds->ds_data = bf->bf_segs[0].ds_addr; 2415 /* 2416 * Calculate rate code. 2417 * XXX everything at min xmit rate 2418 */ 2419 rix = 0; 2420 rt = sc->sc_currates; 2421 rate = rt->info[rix].rateCode; 2422 if (USE_SHPREAMBLE(ic)) 2423 rate |= rt->info[rix].shortPreamble; 2424 ath_hal_setuptxdesc(ah, ds 2425 , m->m_len + IEEE80211_CRC_LEN /* frame length */ 2426 , sizeof(struct ieee80211_frame)/* header length */ 2427 , HAL_PKT_TYPE_BEACON /* Atheros packet type */ 2428 , ni->ni_txpower /* txpower XXX */ 2429 , rate, 1 /* series 0 rate/tries */ 2430 , HAL_TXKEYIX_INVALID /* no encryption */ 2431 , antenna /* antenna mode */ 2432 , flags /* no ack, veol for beacons */ 2433 , 0 /* rts/cts rate */ 2434 , 0 /* rts/cts duration */ 2435 ); 2436 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 2437 ath_hal_filltxdesc(ah, ds 2438 , roundup(m->m_len, 4) /* buffer length */ 2439 , AH_TRUE /* first segment */ 2440 , AH_TRUE /* last segment */ 2441 , ds /* first descriptor */ 2442 ); 2443#if 0 2444 ath_desc_swap(ds); 2445#endif 2446#undef USE_SHPREAMBLE 2447} 2448 2449static void 2450ath_beacon_update(struct ieee80211vap *vap, int item) 2451{ 2452 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; 2453 2454 setbit(bo->bo_flags, item); 2455} 2456 2457/* 2458 * Append the contents of src to dst; both queues 2459 * are assumed to be locked. 2460 */ 2461static void 2462ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2463{ 2464 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2465 dst->axq_link = src->axq_link; 2466 src->axq_link = NULL; 2467 dst->axq_depth += src->axq_depth; 2468 src->axq_depth = 0; 2469} 2470 2471/* 2472 * Transmit a beacon frame at SWBA. Dynamic updates to the 2473 * frame contents are done as needed and the slot time is 2474 * also adjusted based on current state. 2475 */ 2476static void 2477ath_beacon_proc(void *arg, int pending) 2478{ 2479 struct ath_softc *sc = arg; 2480 struct ath_hal *ah = sc->sc_ah; 2481 struct ieee80211vap *vap; 2482 struct ath_buf *bf; 2483 int slot, otherant; 2484 uint32_t bfaddr; 2485 2486 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", 2487 __func__, pending); 2488 /* 2489 * Check if the previous beacon has gone out. If 2490 * not don't try to post another, skip this period 2491 * and wait for the next. Missed beacons indicate 2492 * a problem and should not occur. If we miss too 2493 * many consecutive beacons reset the device. 2494 */ 2495 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 2496 sc->sc_bmisscount++; 2497 sc->sc_stats.ast_be_missed++; 2498 DPRINTF(sc, ATH_DEBUG_BEACON, 2499 "%s: missed %u consecutive beacons\n", 2500 __func__, sc->sc_bmisscount); 2501 if (sc->sc_bmisscount >= ath_bstuck_threshold) 2502 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 2503 return; 2504 } 2505 if (sc->sc_bmisscount != 0) { 2506 DPRINTF(sc, ATH_DEBUG_BEACON, 2507 "%s: resume beacon xmit after %u misses\n", 2508 __func__, sc->sc_bmisscount); 2509 sc->sc_bmisscount = 0; 2510 } 2511 2512 if (sc->sc_stagbeacons) { /* staggered beacons */ 2513 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2514 uint32_t tsftu; 2515 2516 tsftu = ath_hal_gettsf32(ah) >> 10; 2517 /* XXX lintval */ 2518 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; 2519 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; 2520 bfaddr = 0; 2521 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2522 bf = ath_beacon_generate(sc, vap); 2523 if (bf != NULL) 2524 bfaddr = bf->bf_daddr; 2525 } 2526 } else { /* burst'd beacons */ 2527 uint32_t *bflink = &bfaddr; 2528 2529 for (slot = 0; slot < ATH_BCBUF; slot++) { 2530 vap = sc->sc_bslot[slot]; 2531 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) { 2532 bf = ath_beacon_generate(sc, vap); 2533 if (bf != NULL) { 2534 *bflink = bf->bf_daddr; 2535 bflink = &bf->bf_desc->ds_link; 2536 } 2537 } 2538 } 2539 *bflink = 0; /* terminate list */ 2540 } 2541 2542 /* 2543 * Handle slot time change when a non-ERP station joins/leaves 2544 * an 11g network. The 802.11 layer notifies us via callback, 2545 * we mark updateslot, then wait one beacon before effecting 2546 * the change. This gives associated stations at least one 2547 * beacon interval to note the state change. 2548 */ 2549 /* XXX locking */ 2550 if (sc->sc_updateslot == UPDATE) { 2551 sc->sc_updateslot = COMMIT; /* commit next beacon */ 2552 sc->sc_slotupdate = slot; 2553 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 2554 ath_setslottime(sc); /* commit change to h/w */ 2555 2556 /* 2557 * Check recent per-antenna transmit statistics and flip 2558 * the default antenna if noticeably more frames went out 2559 * on the non-default antenna. 2560 * XXX assumes 2 anntenae 2561 */ 2562 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { 2563 otherant = sc->sc_defant & 1 ? 2 : 1; 2564 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 2565 ath_setdefantenna(sc, otherant); 2566 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 2567 } 2568 2569 if (bfaddr != 0) { 2570 /* 2571 * Stop any current dma and put the new frame on the queue. 2572 * This should never fail since we check above that no frames 2573 * are still pending on the queue. 2574 */ 2575 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 2576 DPRINTF(sc, ATH_DEBUG_ANY, 2577 "%s: beacon queue %u did not stop?\n", 2578 __func__, sc->sc_bhalq); 2579 } 2580 /* NB: cabq traffic should already be queued and primed */ 2581 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); 2582 ath_hal_txstart(ah, sc->sc_bhalq); 2583 2584 sc->sc_stats.ast_be_xmit++; 2585 } 2586} 2587 2588static struct ath_buf * 2589ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) 2590{ 2591 struct ath_vap *avp = ATH_VAP(vap); 2592 struct ath_txq *cabq = sc->sc_cabq; 2593 struct ath_buf *bf; 2594 struct mbuf *m; 2595 int nmcastq, error; 2596 2597 KASSERT(vap->iv_state >= IEEE80211_S_RUN, 2598 ("not running, state %d", vap->iv_state)); 2599 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 2600 2601 /* 2602 * Update dynamic beacon contents. If this returns 2603 * non-zero then we need to remap the memory because 2604 * the beacon frame changed size (probably because 2605 * of the TIM bitmap). 2606 */ 2607 bf = avp->av_bcbuf; 2608 m = bf->bf_m; 2609 nmcastq = avp->av_mcastq.axq_depth; 2610 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { 2611 /* XXX too conservative? */ 2612 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2613 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2614 bf->bf_segs, &bf->bf_nseg, 2615 BUS_DMA_NOWAIT); 2616 if (error != 0) { 2617 if_printf(vap->iv_ifp, 2618 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 2619 __func__, error); 2620 return NULL; 2621 } 2622 } 2623 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { 2624 DPRINTF(sc, ATH_DEBUG_BEACON, 2625 "%s: cabq did not drain, mcastq %u cabq %u\n", 2626 __func__, nmcastq, cabq->axq_depth); 2627 sc->sc_stats.ast_cabq_busy++; 2628 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { 2629 /* 2630 * CABQ traffic from a previous vap is still pending. 2631 * We must drain the q before this beacon frame goes 2632 * out as otherwise this vap's stations will get cab 2633 * frames from a different vap. 2634 * XXX could be slow causing us to miss DBA 2635 */ 2636 ath_tx_draintxq(sc, cabq); 2637 } 2638 } 2639 ath_beacon_setup(sc, bf); 2640 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 2641 2642 /* 2643 * Enable the CAB queue before the beacon queue to 2644 * insure cab frames are triggered by this beacon. 2645 */ 2646 if (avp->av_boff.bo_tim[4] & 1) { 2647 struct ath_hal *ah = sc->sc_ah; 2648 2649 /* NB: only at DTIM */ 2650 ATH_TXQ_LOCK(cabq); 2651 ATH_TXQ_LOCK(&avp->av_mcastq); 2652 if (nmcastq) { 2653 struct ath_buf *bfm; 2654 2655 /* 2656 * Move frames from the s/w mcast q to the h/w cab q. 2657 * XXX MORE_DATA bit 2658 */ 2659 bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); 2660 if (cabq->axq_link != NULL) { 2661 *cabq->axq_link = bfm->bf_daddr; 2662 } else 2663 ath_hal_puttxbuf(ah, cabq->axq_qnum, 2664 bfm->bf_daddr); 2665 ath_txqmove(cabq, &avp->av_mcastq); 2666 2667 sc->sc_stats.ast_cabq_xmit += nmcastq; 2668 } 2669 /* NB: gated by beacon so safe to start here */ 2670 if (! TAILQ_EMPTY(&(cabq->axq_q))) 2671 ath_hal_txstart(ah, cabq->axq_qnum); 2672 ATH_TXQ_UNLOCK(&avp->av_mcastq); 2673 ATH_TXQ_UNLOCK(cabq); 2674 } 2675 return bf; 2676} 2677 2678static void 2679ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) 2680{ 2681 struct ath_vap *avp = ATH_VAP(vap); 2682 struct ath_hal *ah = sc->sc_ah; 2683 struct ath_buf *bf; 2684 struct mbuf *m; 2685 int error; 2686 2687 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); 2688 2689 /* 2690 * Update dynamic beacon contents. If this returns 2691 * non-zero then we need to remap the memory because 2692 * the beacon frame changed size (probably because 2693 * of the TIM bitmap). 2694 */ 2695 bf = avp->av_bcbuf; 2696 m = bf->bf_m; 2697 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { 2698 /* XXX too conservative? */ 2699 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2700 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, 2701 bf->bf_segs, &bf->bf_nseg, 2702 BUS_DMA_NOWAIT); 2703 if (error != 0) { 2704 if_printf(vap->iv_ifp, 2705 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", 2706 __func__, error); 2707 return; 2708 } 2709 } 2710 ath_beacon_setup(sc, bf); 2711 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); 2712 2713 /* NB: caller is known to have already stopped tx dma */ 2714 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 2715 ath_hal_txstart(ah, sc->sc_bhalq); 2716} 2717 2718/* 2719 * Reset the hardware after detecting beacons have stopped. 2720 */ 2721static void 2722ath_bstuck_proc(void *arg, int pending) 2723{ 2724 struct ath_softc *sc = arg; 2725 struct ifnet *ifp = sc->sc_ifp; 2726 2727 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2728 sc->sc_bmisscount); 2729 sc->sc_stats.ast_bstuck++; 2730 ath_reset(ifp); 2731} 2732 2733/* 2734 * Reclaim beacon resources and return buffer to the pool. 2735 */ 2736static void 2737ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) 2738{ 2739 2740 if (bf->bf_m != NULL) { 2741 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2742 m_freem(bf->bf_m); 2743 bf->bf_m = NULL; 2744 } 2745 if (bf->bf_node != NULL) { 2746 ieee80211_free_node(bf->bf_node); 2747 bf->bf_node = NULL; 2748 } 2749 TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); 2750} 2751 2752/* 2753 * Reclaim beacon resources. 2754 */ 2755static void 2756ath_beacon_free(struct ath_softc *sc) 2757{ 2758 struct ath_buf *bf; 2759 2760 TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { 2761 if (bf->bf_m != NULL) { 2762 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 2763 m_freem(bf->bf_m); 2764 bf->bf_m = NULL; 2765 } 2766 if (bf->bf_node != NULL) { 2767 ieee80211_free_node(bf->bf_node); 2768 bf->bf_node = NULL; 2769 } 2770 } 2771} 2772 2773/* 2774 * Configure the beacon and sleep timers. 2775 * 2776 * When operating as an AP this resets the TSF and sets 2777 * up the hardware to notify us when we need to issue beacons. 2778 * 2779 * When operating in station mode this sets up the beacon 2780 * timers according to the timestamp of the last received 2781 * beacon and the current TSF, configures PCF and DTIM 2782 * handling, programs the sleep registers so the hardware 2783 * will wakeup in time to receive beacons, and configures 2784 * the beacon miss handling so we'll receive a BMISS 2785 * interrupt when we stop seeing beacons from the AP 2786 * we've associated with. 2787 */ 2788static void 2789ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) 2790{ 2791#define TSF_TO_TU(_h,_l) \ 2792 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 2793#define FUDGE 2 2794 struct ath_hal *ah = sc->sc_ah; 2795 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2796 struct ieee80211_node *ni; 2797 u_int32_t nexttbtt, intval, tsftu; 2798 u_int64_t tsf; 2799 2800 if (vap == NULL) 2801 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 2802 ni = vap->iv_bss; 2803 2804 /* extract tstamp from last beacon and convert to TU */ 2805 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), 2806 LE_READ_4(ni->ni_tstamp.data)); 2807 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2808 ic->ic_opmode == IEEE80211_M_MBSS) { 2809 /* 2810 * For multi-bss ap/mesh support beacons are either staggered 2811 * evenly over N slots or burst together. For the former 2812 * arrange for the SWBA to be delivered for each slot. 2813 * Slots that are not occupied will generate nothing. 2814 */ 2815 /* NB: the beacon interval is kept internally in TU's */ 2816 intval = ni->ni_intval & HAL_BEACON_PERIOD; 2817 if (sc->sc_stagbeacons) 2818 intval /= ATH_BCBUF; 2819 } else { 2820 /* NB: the beacon interval is kept internally in TU's */ 2821 intval = ni->ni_intval & HAL_BEACON_PERIOD; 2822 } 2823 if (nexttbtt == 0) /* e.g. for ap mode */ 2824 nexttbtt = intval; 2825 else if (intval) /* NB: can be 0 for monitor mode */ 2826 nexttbtt = roundup(nexttbtt, intval); 2827 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 2828 __func__, nexttbtt, intval, ni->ni_intval); 2829 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { 2830 HAL_BEACON_STATE bs; 2831 int dtimperiod, dtimcount; 2832 int cfpperiod, cfpcount; 2833 2834 /* 2835 * Setup dtim and cfp parameters according to 2836 * last beacon we received (which may be none). 2837 */ 2838 dtimperiod = ni->ni_dtim_period; 2839 if (dtimperiod <= 0) /* NB: 0 if not known */ 2840 dtimperiod = 1; 2841 dtimcount = ni->ni_dtim_count; 2842 if (dtimcount >= dtimperiod) /* NB: sanity check */ 2843 dtimcount = 0; /* XXX? */ 2844 cfpperiod = 1; /* NB: no PCF support yet */ 2845 cfpcount = 0; 2846 /* 2847 * Pull nexttbtt forward to reflect the current 2848 * TSF and calculate dtim+cfp state for the result. 2849 */ 2850 tsf = ath_hal_gettsf64(ah); 2851 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 2852 do { 2853 nexttbtt += intval; 2854 if (--dtimcount < 0) { 2855 dtimcount = dtimperiod - 1; 2856 if (--cfpcount < 0) 2857 cfpcount = cfpperiod - 1; 2858 } 2859 } while (nexttbtt < tsftu); 2860 memset(&bs, 0, sizeof(bs)); 2861 bs.bs_intval = intval; 2862 bs.bs_nexttbtt = nexttbtt; 2863 bs.bs_dtimperiod = dtimperiod*intval; 2864 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; 2865 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 2866 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 2867 bs.bs_cfpmaxduration = 0; 2868#if 0 2869 /* 2870 * The 802.11 layer records the offset to the DTIM 2871 * bitmap while receiving beacons; use it here to 2872 * enable h/w detection of our AID being marked in 2873 * the bitmap vector (to indicate frames for us are 2874 * pending at the AP). 2875 * XXX do DTIM handling in s/w to WAR old h/w bugs 2876 * XXX enable based on h/w rev for newer chips 2877 */ 2878 bs.bs_timoffset = ni->ni_timoff; 2879#endif 2880 /* 2881 * Calculate the number of consecutive beacons to miss 2882 * before taking a BMISS interrupt. 2883 * Note that we clamp the result to at most 10 beacons. 2884 */ 2885 bs.bs_bmissthreshold = vap->iv_bmissthreshold; 2886 if (bs.bs_bmissthreshold > 10) 2887 bs.bs_bmissthreshold = 10; 2888 else if (bs.bs_bmissthreshold <= 0) 2889 bs.bs_bmissthreshold = 1; 2890 2891 /* 2892 * Calculate sleep duration. The configuration is 2893 * given in ms. We insure a multiple of the beacon 2894 * period is used. Also, if the sleep duration is 2895 * greater than the DTIM period then it makes senses 2896 * to make it a multiple of that. 2897 * 2898 * XXX fixed at 100ms 2899 */ 2900 bs.bs_sleepduration = 2901 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); 2902 if (bs.bs_sleepduration > bs.bs_dtimperiod) 2903 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); 2904 2905 DPRINTF(sc, ATH_DEBUG_BEACON, 2906 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" 2907 , __func__ 2908 , tsf, tsftu 2909 , bs.bs_intval 2910 , bs.bs_nexttbtt 2911 , bs.bs_dtimperiod 2912 , bs.bs_nextdtim 2913 , bs.bs_bmissthreshold 2914 , bs.bs_sleepduration 2915 , bs.bs_cfpperiod 2916 , bs.bs_cfpmaxduration 2917 , bs.bs_cfpnext 2918 , bs.bs_timoffset 2919 ); 2920 ath_hal_intrset(ah, 0); 2921 ath_hal_beacontimers(ah, &bs); 2922 sc->sc_imask |= HAL_INT_BMISS; 2923 ath_hal_intrset(ah, sc->sc_imask); 2924 } else { 2925 ath_hal_intrset(ah, 0); 2926 if (nexttbtt == intval) 2927 intval |= HAL_BEACON_RESET_TSF; 2928 if (ic->ic_opmode == IEEE80211_M_IBSS) { 2929 /* 2930 * In IBSS mode enable the beacon timers but only 2931 * enable SWBA interrupts if we need to manually 2932 * prepare beacon frames. Otherwise we use a 2933 * self-linked tx descriptor and let the hardware 2934 * deal with things. 2935 */ 2936 intval |= HAL_BEACON_ENA; 2937 if (!sc->sc_hasveol) 2938 sc->sc_imask |= HAL_INT_SWBA; 2939 if ((intval & HAL_BEACON_RESET_TSF) == 0) { 2940 /* 2941 * Pull nexttbtt forward to reflect 2942 * the current TSF. 2943 */ 2944 tsf = ath_hal_gettsf64(ah); 2945 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 2946 do { 2947 nexttbtt += intval; 2948 } while (nexttbtt < tsftu); 2949 } 2950 ath_beaconq_config(sc); 2951 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2952 ic->ic_opmode == IEEE80211_M_MBSS) { 2953 /* 2954 * In AP/mesh mode we enable the beacon timers 2955 * and SWBA interrupts to prepare beacon frames. 2956 */ 2957 intval |= HAL_BEACON_ENA; 2958 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ 2959 ath_beaconq_config(sc); 2960 } 2961 ath_hal_beaconinit(ah, nexttbtt, intval); 2962 sc->sc_bmisscount = 0; 2963 ath_hal_intrset(ah, sc->sc_imask); 2964 /* 2965 * When using a self-linked beacon descriptor in 2966 * ibss mode load it once here. 2967 */ 2968 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) 2969 ath_beacon_start_adhoc(sc, vap); 2970 } 2971 sc->sc_syncbeacon = 0; 2972#undef FUDGE 2973#undef TSF_TO_TU 2974} 2975 2976static void 2977ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2978{ 2979 bus_addr_t *paddr = (bus_addr_t*) arg; 2980 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2981 *paddr = segs->ds_addr; 2982} 2983 2984static int 2985ath_descdma_setup(struct ath_softc *sc, 2986 struct ath_descdma *dd, ath_bufhead *head, 2987 const char *name, int nbuf, int ndesc) 2988{ 2989#define DS2PHYS(_dd, _ds) \ 2990 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2991#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2992 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2993 struct ifnet *ifp = sc->sc_ifp; 2994 uint8_t *ds; 2995 struct ath_buf *bf; 2996 int i, bsize, error; 2997 int desc_len; 2998 2999 desc_len = sizeof(struct ath_desc); 3000 3001 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 3002 __func__, name, nbuf, ndesc); 3003 3004 dd->dd_name = name; 3005 dd->dd_desc_len = desc_len * nbuf * ndesc; 3006 3007 /* 3008 * Merlin work-around: 3009 * Descriptors that cross the 4KB boundary can't be used. 3010 * Assume one skipped descriptor per 4KB page. 3011 */ 3012 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3013 int numdescpage = 4096 / (desc_len * ndesc); 3014 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 3015 } 3016 3017 /* 3018 * Setup DMA descriptor area. 3019 */ 3020 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 3021 PAGE_SIZE, 0, /* alignment, bounds */ 3022 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 3023 BUS_SPACE_MAXADDR, /* highaddr */ 3024 NULL, NULL, /* filter, filterarg */ 3025 dd->dd_desc_len, /* maxsize */ 3026 1, /* nsegments */ 3027 dd->dd_desc_len, /* maxsegsize */ 3028 BUS_DMA_ALLOCNOW, /* flags */ 3029 NULL, /* lockfunc */ 3030 NULL, /* lockarg */ 3031 &dd->dd_dmat); 3032 if (error != 0) { 3033 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 3034 return error; 3035 } 3036 3037 /* allocate descriptors */ 3038 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 3039 if (error != 0) { 3040 if_printf(ifp, "unable to create dmamap for %s descriptors, " 3041 "error %u\n", dd->dd_name, error); 3042 goto fail0; 3043 } 3044 3045 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 3046 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 3047 &dd->dd_dmamap); 3048 if (error != 0) { 3049 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 3050 "error %u\n", nbuf * ndesc, dd->dd_name, error); 3051 goto fail1; 3052 } 3053 3054 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 3055 dd->dd_desc, dd->dd_desc_len, 3056 ath_load_cb, &dd->dd_desc_paddr, 3057 BUS_DMA_NOWAIT); 3058 if (error != 0) { 3059 if_printf(ifp, "unable to map %s descriptors, error %u\n", 3060 dd->dd_name, error); 3061 goto fail2; 3062 } 3063 3064 ds = (uint8_t *) dd->dd_desc; 3065 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 3066 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 3067 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 3068 3069 /* allocate rx buffers */ 3070 bsize = sizeof(struct ath_buf) * nbuf; 3071 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 3072 if (bf == NULL) { 3073 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 3074 dd->dd_name, bsize); 3075 goto fail3; 3076 } 3077 dd->dd_bufptr = bf; 3078 3079 TAILQ_INIT(head); 3080 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 3081 bf->bf_desc = (struct ath_desc *) ds; 3082 bf->bf_daddr = DS2PHYS(dd, ds); 3083 if (! ath_hal_split4ktrans(sc->sc_ah)) { 3084 /* 3085 * Merlin WAR: Skip descriptor addresses which 3086 * cause 4KB boundary crossing along any point 3087 * in the descriptor. 3088 */ 3089 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 3090 desc_len * ndesc)) { 3091 /* Start at the next page */ 3092 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 3093 bf->bf_desc = (struct ath_desc *) ds; 3094 bf->bf_daddr = DS2PHYS(dd, ds); 3095 } 3096 } 3097 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 3098 &bf->bf_dmamap); 3099 if (error != 0) { 3100 if_printf(ifp, "unable to create dmamap for %s " 3101 "buffer %u, error %u\n", dd->dd_name, i, error); 3102 ath_descdma_cleanup(sc, dd, head); 3103 return error; 3104 } 3105 TAILQ_INSERT_TAIL(head, bf, bf_list); 3106 } 3107 return 0; 3108fail3: 3109 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3110fail2: 3111 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3112fail1: 3113 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3114fail0: 3115 bus_dma_tag_destroy(dd->dd_dmat); 3116 memset(dd, 0, sizeof(*dd)); 3117 return error; 3118#undef DS2PHYS 3119#undef ATH_DESC_4KB_BOUND_CHECK 3120} 3121 3122static void 3123ath_descdma_cleanup(struct ath_softc *sc, 3124 struct ath_descdma *dd, ath_bufhead *head) 3125{ 3126 struct ath_buf *bf; 3127 struct ieee80211_node *ni; 3128 3129 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 3130 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 3131 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 3132 bus_dma_tag_destroy(dd->dd_dmat); 3133 3134 TAILQ_FOREACH(bf, head, bf_list) { 3135 if (bf->bf_m) { 3136 m_freem(bf->bf_m); 3137 bf->bf_m = NULL; 3138 } 3139 if (bf->bf_dmamap != NULL) { 3140 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 3141 bf->bf_dmamap = NULL; 3142 } 3143 ni = bf->bf_node; 3144 bf->bf_node = NULL; 3145 if (ni != NULL) { 3146 /* 3147 * Reclaim node reference. 3148 */ 3149 ieee80211_free_node(ni); 3150 } 3151 } 3152 3153 TAILQ_INIT(head); 3154 free(dd->dd_bufptr, M_ATHDEV); 3155 memset(dd, 0, sizeof(*dd)); 3156} 3157 3158static int 3159ath_desc_alloc(struct ath_softc *sc) 3160{ 3161 int error; 3162 3163 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 3164 "rx", ath_rxbuf, 1); 3165 if (error != 0) 3166 return error; 3167 3168 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3169 "tx", ath_txbuf, ATH_TXDESC); 3170 if (error != 0) { 3171 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3172 return error; 3173 } 3174 3175 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3176 "beacon", ATH_BCBUF, 1); 3177 if (error != 0) { 3178 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3179 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3180 return error; 3181 } 3182 return 0; 3183} 3184 3185static void 3186ath_desc_free(struct ath_softc *sc) 3187{ 3188 3189 if (sc->sc_bdma.dd_desc_len != 0) 3190 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3191 if (sc->sc_txdma.dd_desc_len != 0) 3192 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3193 if (sc->sc_rxdma.dd_desc_len != 0) 3194 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 3195} 3196 3197static struct ieee80211_node * 3198ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3199{ 3200 struct ieee80211com *ic = vap->iv_ic; 3201 struct ath_softc *sc = ic->ic_ifp->if_softc; 3202 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3203 struct ath_node *an; 3204 3205 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3206 if (an == NULL) { 3207 /* XXX stat+msg */ 3208 return NULL; 3209 } 3210 ath_rate_node_init(sc, an); 3211 3212 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3213 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3214 device_get_nameunit(sc->sc_dev), an); 3215 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3216 3217 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 3218 return &an->an_node; 3219} 3220 3221static void 3222ath_node_free(struct ieee80211_node *ni) 3223{ 3224 struct ieee80211com *ic = ni->ni_ic; 3225 struct ath_softc *sc = ic->ic_ifp->if_softc; 3226 3227 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 3228 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3229 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3230 sc->sc_node_free(ni); 3231} 3232 3233static void 3234ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3235{ 3236 struct ieee80211com *ic = ni->ni_ic; 3237 struct ath_softc *sc = ic->ic_ifp->if_softc; 3238 struct ath_hal *ah = sc->sc_ah; 3239 3240 *rssi = ic->ic_node_getrssi(ni); 3241 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3242 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3243 else 3244 *noise = -95; /* nominally correct */ 3245} 3246 3247static int 3248ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) 3249{ 3250 struct ath_hal *ah = sc->sc_ah; 3251 int error; 3252 struct mbuf *m; 3253 struct ath_desc *ds; 3254 3255 m = bf->bf_m; 3256 if (m == NULL) { 3257 /* 3258 * NB: by assigning a page to the rx dma buffer we 3259 * implicitly satisfy the Atheros requirement that 3260 * this buffer be cache-line-aligned and sized to be 3261 * multiple of the cache line size. Not doing this 3262 * causes weird stuff to happen (for the 5210 at least). 3263 */ 3264 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3265 if (m == NULL) { 3266 DPRINTF(sc, ATH_DEBUG_ANY, 3267 "%s: no mbuf/cluster\n", __func__); 3268 sc->sc_stats.ast_rx_nombuf++; 3269 return ENOMEM; 3270 } 3271 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 3272 3273 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, 3274 bf->bf_dmamap, m, 3275 bf->bf_segs, &bf->bf_nseg, 3276 BUS_DMA_NOWAIT); 3277 if (error != 0) { 3278 DPRINTF(sc, ATH_DEBUG_ANY, 3279 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", 3280 __func__, error); 3281 sc->sc_stats.ast_rx_busdma++; 3282 m_freem(m); 3283 return error; 3284 } 3285 KASSERT(bf->bf_nseg == 1, 3286 ("multi-segment packet; nseg %u", bf->bf_nseg)); 3287 bf->bf_m = m; 3288 } 3289 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); 3290 3291 /* 3292 * Setup descriptors. For receive we always terminate 3293 * the descriptor list with a self-linked entry so we'll 3294 * not get overrun under high load (as can happen with a 3295 * 5212 when ANI processing enables PHY error frames). 3296 * 3297 * To insure the last descriptor is self-linked we create 3298 * each descriptor as self-linked and add it to the end. As 3299 * each additional descriptor is added the previous self-linked 3300 * entry is ``fixed'' naturally. This should be safe even 3301 * if DMA is happening. When processing RX interrupts we 3302 * never remove/process the last, self-linked, entry on the 3303 * descriptor list. This insures the hardware always has 3304 * someplace to write a new frame. 3305 */ 3306 /* 3307 * 11N: we can no longer afford to self link the last descriptor. 3308 * MAC acknowledges BA status as long as it copies frames to host 3309 * buffer (or rx fifo). This can incorrectly acknowledge packets 3310 * to a sender if last desc is self-linked. 3311 */ 3312 ds = bf->bf_desc; 3313 if (sc->sc_rxslink) 3314 ds->ds_link = bf->bf_daddr; /* link to self */ 3315 else 3316 ds->ds_link = 0; /* terminate the list */ 3317 ds->ds_data = bf->bf_segs[0].ds_addr; 3318 ath_hal_setuprxdesc(ah, ds 3319 , m->m_len /* buffer size */ 3320 , 0 3321 ); 3322 3323 if (sc->sc_rxlink != NULL) 3324 *sc->sc_rxlink = bf->bf_daddr; 3325 sc->sc_rxlink = &ds->ds_link; 3326 return 0; 3327} 3328 3329/* 3330 * Extend 15-bit time stamp from rx descriptor to 3331 * a full 64-bit TSF using the specified TSF. 3332 */ 3333static __inline u_int64_t 3334ath_extend_tsf15(u_int32_t rstamp, u_int64_t tsf) 3335{ 3336 if ((tsf & 0x7fff) < rstamp) 3337 tsf -= 0x8000; 3338 3339 return ((tsf &~ 0x7fff) | rstamp); 3340} 3341 3342/* 3343 * Extend 32-bit time stamp from rx descriptor to 3344 * a full 64-bit TSF using the specified TSF. 3345 */ 3346static __inline u_int64_t 3347ath_extend_tsf32(u_int32_t rstamp, u_int64_t tsf) 3348{ 3349 u_int32_t tsf_low = tsf & 0xffffffff; 3350 u_int64_t tsf64 = (tsf & ~0xffffffffULL) | rstamp; 3351 3352 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000)) 3353 tsf64 -= 0x100000000ULL; 3354 3355 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000)) 3356 tsf64 += 0x100000000ULL; 3357 3358 return tsf64; 3359} 3360 3361/* 3362 * Extend the TSF from the RX descriptor to a full 64 bit TSF. 3363 * Earlier hardware versions only wrote the low 15 bits of the 3364 * TSF into the RX descriptor; later versions (AR5416 and up) 3365 * include the 32 bit TSF value. 3366 */ 3367static __inline u_int64_t 3368ath_extend_tsf(struct ath_softc *sc, u_int32_t rstamp, u_int64_t tsf) 3369{ 3370 if (sc->sc_rxtsf32) 3371 return ath_extend_tsf32(rstamp, tsf); 3372 else 3373 return ath_extend_tsf15(rstamp, tsf); 3374} 3375 3376/* 3377 * Intercept management frames to collect beacon rssi data 3378 * and to do ibss merges. 3379 */ 3380static void 3381ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, 3382 int subtype, int rssi, int nf) 3383{ 3384 struct ieee80211vap *vap = ni->ni_vap; 3385 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 3386 3387 /* 3388 * Call up first so subsequent work can use information 3389 * potentially stored in the node (e.g. for ibss merge). 3390 */ 3391 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf); 3392 switch (subtype) { 3393 case IEEE80211_FC0_SUBTYPE_BEACON: 3394 /* update rssi statistics for use by the hal */ 3395 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); 3396 if (sc->sc_syncbeacon && 3397 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { 3398 /* 3399 * Resync beacon timers using the tsf of the beacon 3400 * frame we just received. 3401 */ 3402 ath_beacon_config(sc, vap); 3403 } 3404 /* fall thru... */ 3405 case IEEE80211_FC0_SUBTYPE_PROBE_RESP: 3406 if (vap->iv_opmode == IEEE80211_M_IBSS && 3407 vap->iv_state == IEEE80211_S_RUN) { 3408 uint32_t rstamp = sc->sc_lastrs->rs_tstamp; 3409 uint64_t tsf = ath_extend_tsf(sc, rstamp, 3410 ath_hal_gettsf64(sc->sc_ah)); 3411 /* 3412 * Handle ibss merge as needed; check the tsf on the 3413 * frame before attempting the merge. The 802.11 spec 3414 * says the station should change it's bssid to match 3415 * the oldest station with the same ssid, where oldest 3416 * is determined by the tsf. Note that hardware 3417 * reconfiguration happens through callback to 3418 * ath_newstate as the state machine will go from 3419 * RUN -> RUN when this happens. 3420 */ 3421 if (le64toh(ni->ni_tstamp.tsf) >= tsf) { 3422 DPRINTF(sc, ATH_DEBUG_STATE, 3423 "ibss merge, rstamp %u tsf %ju " 3424 "tstamp %ju\n", rstamp, (uintmax_t)tsf, 3425 (uintmax_t)ni->ni_tstamp.tsf); 3426 (void) ieee80211_ibss_merge(ni); 3427 } 3428 } 3429 break; 3430 } 3431} 3432 3433/* 3434 * Set the default antenna. 3435 */ 3436static void 3437ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3438{ 3439 struct ath_hal *ah = sc->sc_ah; 3440 3441 /* XXX block beacon interrupts */ 3442 ath_hal_setdefantenna(ah, antenna); 3443 if (sc->sc_defant != antenna) 3444 sc->sc_stats.ast_ant_defswitch++; 3445 sc->sc_defant = antenna; 3446 sc->sc_rxotherant = 0; 3447} 3448 3449static void 3450ath_rx_tap(struct ifnet *ifp, struct mbuf *m, 3451 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) 3452{ 3453#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) 3454#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) 3455#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) 3456#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) 3457 struct ath_softc *sc = ifp->if_softc; 3458 const HAL_RATE_TABLE *rt; 3459 uint8_t rix; 3460 3461 rt = sc->sc_currates; 3462 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); 3463 rix = rt->rateCodeToIndex[rs->rs_rate]; 3464 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; 3465 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; 3466#ifdef AH_SUPPORT_AR5416 3467 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; 3468 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ 3469 struct ieee80211com *ic = ifp->if_l2com; 3470 3471 if ((rs->rs_flags & HAL_RX_2040) == 0) 3472 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; 3473 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) 3474 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; 3475 else 3476 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; 3477 if ((rs->rs_flags & HAL_RX_GI) == 0) 3478 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; 3479 } 3480#endif 3481 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); 3482 if (rs->rs_status & HAL_RXERR_CRC) 3483 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; 3484 /* XXX propagate other error flags from descriptor */ 3485 sc->sc_rx_th.wr_antnoise = nf; 3486 sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; 3487 sc->sc_rx_th.wr_antenna = rs->rs_antenna; 3488#undef CHAN_HT 3489#undef CHAN_HT20 3490#undef CHAN_HT40U 3491#undef CHAN_HT40D 3492} 3493 3494static void 3495ath_handle_micerror(struct ieee80211com *ic, 3496 struct ieee80211_frame *wh, int keyix) 3497{ 3498 struct ieee80211_node *ni; 3499 3500 /* XXX recheck MIC to deal w/ chips that lie */ 3501 /* XXX discard MIC errors on !data frames */ 3502 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); 3503 if (ni != NULL) { 3504 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); 3505 ieee80211_free_node(ni); 3506 } 3507} 3508 3509/* 3510 * Only run the RX proc if it's not already running. 3511 * Since this may get run as part of the reset/flush path, 3512 * the task can't clash with an existing, running tasklet. 3513 */ 3514static void 3515ath_rx_tasklet(void *arg, int npending) 3516{ 3517 struct ath_softc *sc = arg; 3518 3519 CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending); 3520 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); 3521 ath_rx_proc(sc, 1); 3522} 3523 3524static void 3525ath_rx_proc(struct ath_softc *sc, int resched) 3526{ 3527#define PA2DESC(_sc, _pa) \ 3528 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 3529 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 3530 struct ath_buf *bf; 3531 struct ifnet *ifp = sc->sc_ifp; 3532 struct ieee80211com *ic = ifp->if_l2com; 3533 struct ath_hal *ah = sc->sc_ah; 3534 struct ath_desc *ds; 3535 struct ath_rx_status *rs; 3536 struct mbuf *m; 3537 struct ieee80211_node *ni; 3538 int len, type, ngood; 3539 HAL_STATUS status; 3540 int16_t nf; 3541 u_int64_t tsf; 3542 int npkts = 0; 3543 3544 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); 3545 ngood = 0; 3546 nf = ath_hal_getchannoise(ah, sc->sc_curchan); 3547 sc->sc_stats.ast_rx_noise = nf; 3548 tsf = ath_hal_gettsf64(ah); 3549 do { 3550 bf = TAILQ_FIRST(&sc->sc_rxbuf); 3551 if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ 3552 if_printf(ifp, "%s: no buffer!\n", __func__); 3553 break; 3554 } else if (bf == NULL) { 3555 /* 3556 * End of List: 3557 * this can happen for non-self-linked RX chains 3558 */ 3559 sc->sc_stats.ast_rx_hitqueueend++; 3560 break; 3561 } 3562 m = bf->bf_m; 3563 if (m == NULL) { /* NB: shouldn't happen */ 3564 /* 3565 * If mbuf allocation failed previously there 3566 * will be no mbuf; try again to re-populate it. 3567 */ 3568 /* XXX make debug msg */ 3569 if_printf(ifp, "%s: no mbuf!\n", __func__); 3570 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 3571 goto rx_next; 3572 } 3573 ds = bf->bf_desc; 3574 if (ds->ds_link == bf->bf_daddr) { 3575 /* NB: never process the self-linked entry at the end */ 3576 sc->sc_stats.ast_rx_hitqueueend++; 3577 break; 3578 } 3579 /* XXX sync descriptor memory */ 3580 /* 3581 * Must provide the virtual address of the current 3582 * descriptor, the physical address, and the virtual 3583 * address of the next descriptor in the h/w chain. 3584 * This allows the HAL to look ahead to see if the 3585 * hardware is done with a descriptor by checking the 3586 * done bit in the following descriptor and the address 3587 * of the current descriptor the DMA engine is working 3588 * on. All this is necessary because of our use of 3589 * a self-linked list to avoid rx overruns. 3590 */ 3591 rs = &bf->bf_status.ds_rxstat; 3592 status = ath_hal_rxprocdesc(ah, ds, 3593 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 3594#ifdef ATH_DEBUG 3595 if (sc->sc_debug & ATH_DEBUG_RECV_DESC) 3596 ath_printrxbuf(sc, bf, 0, status == HAL_OK); 3597#endif 3598 if (status == HAL_EINPROGRESS) 3599 break; 3600 3601 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); 3602 npkts++; 3603 3604 /* These aren't specifically errors */ 3605 if (rs->rs_flags & HAL_RX_GI) 3606 sc->sc_stats.ast_rx_halfgi++; 3607 if (rs->rs_flags & HAL_RX_2040) 3608 sc->sc_stats.ast_rx_2040++; 3609 if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) 3610 sc->sc_stats.ast_rx_pre_crc_err++; 3611 if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) 3612 sc->sc_stats.ast_rx_post_crc_err++; 3613 if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) 3614 sc->sc_stats.ast_rx_decrypt_busy_err++; 3615 if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) 3616 sc->sc_stats.ast_rx_hi_rx_chain++; 3617 3618 if (rs->rs_status != 0) { 3619 if (rs->rs_status & HAL_RXERR_CRC) 3620 sc->sc_stats.ast_rx_crcerr++; 3621 if (rs->rs_status & HAL_RXERR_FIFO) 3622 sc->sc_stats.ast_rx_fifoerr++; 3623 if (rs->rs_status & HAL_RXERR_PHY) { 3624 sc->sc_stats.ast_rx_phyerr++; 3625 /* Process DFS radar events */ 3626 if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || 3627 (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { 3628 /* Since we're touching the frame data, sync it */ 3629 bus_dmamap_sync(sc->sc_dmat, 3630 bf->bf_dmamap, 3631 BUS_DMASYNC_POSTREAD); 3632 /* Now pass it to the radar processing code */ 3633 ath_dfs_process_phy_err(sc, mtod(m, char *), tsf, rs); 3634 } 3635 3636 /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ 3637 if (rs->rs_phyerr < 64) 3638 sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; 3639 goto rx_error; /* NB: don't count in ierrors */ 3640 } 3641 if (rs->rs_status & HAL_RXERR_DECRYPT) { 3642 /* 3643 * Decrypt error. If the error occurred 3644 * because there was no hardware key, then 3645 * let the frame through so the upper layers 3646 * can process it. This is necessary for 5210 3647 * parts which have no way to setup a ``clear'' 3648 * key cache entry. 3649 * 3650 * XXX do key cache faulting 3651 */ 3652 if (rs->rs_keyix == HAL_RXKEYIX_INVALID) 3653 goto rx_accept; 3654 sc->sc_stats.ast_rx_badcrypt++; 3655 } 3656 if (rs->rs_status & HAL_RXERR_MIC) { 3657 sc->sc_stats.ast_rx_badmic++; 3658 /* 3659 * Do minimal work required to hand off 3660 * the 802.11 header for notification. 3661 */ 3662 /* XXX frag's and qos frames */ 3663 len = rs->rs_datalen; 3664 if (len >= sizeof (struct ieee80211_frame)) { 3665 bus_dmamap_sync(sc->sc_dmat, 3666 bf->bf_dmamap, 3667 BUS_DMASYNC_POSTREAD); 3668 ath_handle_micerror(ic, 3669 mtod(m, struct ieee80211_frame *), 3670 sc->sc_splitmic ? 3671 rs->rs_keyix-32 : rs->rs_keyix); 3672 } 3673 } 3674 ifp->if_ierrors++; 3675rx_error: 3676 /* 3677 * Cleanup any pending partial frame. 3678 */ 3679 if (sc->sc_rxpending != NULL) { 3680 m_freem(sc->sc_rxpending); 3681 sc->sc_rxpending = NULL; 3682 } 3683 /* 3684 * When a tap is present pass error frames 3685 * that have been requested. By default we 3686 * pass decrypt+mic errors but others may be 3687 * interesting (e.g. crc). 3688 */ 3689 if (ieee80211_radiotap_active(ic) && 3690 (rs->rs_status & sc->sc_monpass)) { 3691 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3692 BUS_DMASYNC_POSTREAD); 3693 /* NB: bpf needs the mbuf length setup */ 3694 len = rs->rs_datalen; 3695 m->m_pkthdr.len = m->m_len = len; 3696 bf->bf_m = NULL; 3697 ath_rx_tap(ifp, m, rs, tsf, nf); 3698 ieee80211_radiotap_rx_all(ic, m); 3699 m_freem(m); 3700 } 3701 /* XXX pass MIC errors up for s/w reclaculation */ 3702 goto rx_next; 3703 } 3704rx_accept: 3705 /* 3706 * Sync and unmap the frame. At this point we're 3707 * committed to passing the mbuf somewhere so clear 3708 * bf_m; this means a new mbuf must be allocated 3709 * when the rx descriptor is setup again to receive 3710 * another frame. 3711 */ 3712 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3713 BUS_DMASYNC_POSTREAD); 3714 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3715 bf->bf_m = NULL; 3716 3717 len = rs->rs_datalen; 3718 m->m_len = len; 3719 3720 if (rs->rs_more) { 3721 /* 3722 * Frame spans multiple descriptors; save 3723 * it for the next completed descriptor, it 3724 * will be used to construct a jumbogram. 3725 */ 3726 if (sc->sc_rxpending != NULL) { 3727 /* NB: max frame size is currently 2 clusters */ 3728 sc->sc_stats.ast_rx_toobig++; 3729 m_freem(sc->sc_rxpending); 3730 } 3731 m->m_pkthdr.rcvif = ifp; 3732 m->m_pkthdr.len = len; 3733 sc->sc_rxpending = m; 3734 goto rx_next; 3735 } else if (sc->sc_rxpending != NULL) { 3736 /* 3737 * This is the second part of a jumbogram, 3738 * chain it to the first mbuf, adjust the 3739 * frame length, and clear the rxpending state. 3740 */ 3741 sc->sc_rxpending->m_next = m; 3742 sc->sc_rxpending->m_pkthdr.len += len; 3743 m = sc->sc_rxpending; 3744 sc->sc_rxpending = NULL; 3745 } else { 3746 /* 3747 * Normal single-descriptor receive; setup 3748 * the rcvif and packet length. 3749 */ 3750 m->m_pkthdr.rcvif = ifp; 3751 m->m_pkthdr.len = len; 3752 } 3753 3754 ifp->if_ipackets++; 3755 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; 3756 3757 /* 3758 * Populate the rx status block. When there are bpf 3759 * listeners we do the additional work to provide 3760 * complete status. Otherwise we fill in only the 3761 * material required by ieee80211_input. Note that 3762 * noise setting is filled in above. 3763 */ 3764 if (ieee80211_radiotap_active(ic)) 3765 ath_rx_tap(ifp, m, rs, tsf, nf); 3766 3767 /* 3768 * From this point on we assume the frame is at least 3769 * as large as ieee80211_frame_min; verify that. 3770 */ 3771 if (len < IEEE80211_MIN_LEN) { 3772 if (!ieee80211_radiotap_active(ic)) { 3773 DPRINTF(sc, ATH_DEBUG_RECV, 3774 "%s: short packet %d\n", __func__, len); 3775 sc->sc_stats.ast_rx_tooshort++; 3776 } else { 3777 /* NB: in particular this captures ack's */ 3778 ieee80211_radiotap_rx_all(ic, m); 3779 } 3780 m_freem(m); 3781 goto rx_next; 3782 } 3783 3784 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { 3785 const HAL_RATE_TABLE *rt = sc->sc_currates; 3786 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; 3787 3788 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, 3789 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); 3790 } 3791 3792 m_adj(m, -IEEE80211_CRC_LEN); 3793 3794 /* 3795 * Locate the node for sender, track state, and then 3796 * pass the (referenced) node up to the 802.11 layer 3797 * for its use. 3798 */ 3799 ni = ieee80211_find_rxnode_withkey(ic, 3800 mtod(m, const struct ieee80211_frame_min *), 3801 rs->rs_keyix == HAL_RXKEYIX_INVALID ? 3802 IEEE80211_KEYIX_NONE : rs->rs_keyix); 3803 sc->sc_lastrs = rs; 3804 3805 if (rs->rs_isaggr) 3806 sc->sc_stats.ast_rx_agg++; 3807 3808 if (ni != NULL) { 3809 /* 3810 * Only punt packets for ampdu reorder processing for 3811 * 11n nodes; net80211 enforces that M_AMPDU is only 3812 * set for 11n nodes. 3813 */ 3814 if (ni->ni_flags & IEEE80211_NODE_HT) 3815 m->m_flags |= M_AMPDU; 3816 3817 /* 3818 * Sending station is known, dispatch directly. 3819 */ 3820 type = ieee80211_input(ni, m, rs->rs_rssi, nf); 3821 ieee80211_free_node(ni); 3822 /* 3823 * Arrange to update the last rx timestamp only for 3824 * frames from our ap when operating in station mode. 3825 * This assumes the rx key is always setup when 3826 * associated. 3827 */ 3828 if (ic->ic_opmode == IEEE80211_M_STA && 3829 rs->rs_keyix != HAL_RXKEYIX_INVALID) 3830 ngood++; 3831 } else { 3832 type = ieee80211_input_all(ic, m, rs->rs_rssi, nf); 3833 } 3834 /* 3835 * Track rx rssi and do any rx antenna management. 3836 */ 3837 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); 3838 if (sc->sc_diversity) { 3839 /* 3840 * When using fast diversity, change the default rx 3841 * antenna if diversity chooses the other antenna 3 3842 * times in a row. 3843 */ 3844 if (sc->sc_defant != rs->rs_antenna) { 3845 if (++sc->sc_rxotherant >= 3) 3846 ath_setdefantenna(sc, rs->rs_antenna); 3847 } else 3848 sc->sc_rxotherant = 0; 3849 } 3850 3851 /* Newer school diversity - kite specific for now */ 3852 /* XXX perhaps migrate the normal diversity code to this? */ 3853 if ((ah)->ah_rxAntCombDiversity) 3854 (*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz); 3855 3856 if (sc->sc_softled) { 3857 /* 3858 * Blink for any data frame. Otherwise do a 3859 * heartbeat-style blink when idle. The latter 3860 * is mainly for station mode where we depend on 3861 * periodic beacon frames to trigger the poll event. 3862 */ 3863 if (type == IEEE80211_FC0_TYPE_DATA) { 3864 const HAL_RATE_TABLE *rt = sc->sc_currates; 3865 ath_led_event(sc, 3866 rt->rateCodeToIndex[rs->rs_rate]); 3867 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) 3868 ath_led_event(sc, 0); 3869 } 3870rx_next: 3871 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); 3872 } while (ath_rxbuf_init(sc, bf) == 0); 3873 3874 /* rx signal state monitoring */ 3875 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); 3876 if (ngood) 3877 sc->sc_lastrx = tsf; 3878 3879 /* Queue DFS tasklet if needed */ 3880 if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) 3881 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); 3882 3883 /* 3884 * Now that all the RX frames were handled that 3885 * need to be handled, kick the PCU if there's 3886 * been an RXEOL condition. 3887 */ 3888 if (resched && sc->sc_kickpcu) { 3889 device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", 3890 __func__, npkts); 3891 3892 /* XXX rxslink? */ 3893 bf = TAILQ_FIRST(&sc->sc_rxbuf); 3894 ath_hal_putrxbuf(ah, bf->bf_daddr); 3895 ath_hal_rxena(ah); /* enable recv descriptors */ 3896 ath_mode_init(sc); /* set filters, etc. */ 3897 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 3898 3899 ATH_LOCK(sc); 3900 ath_hal_intrset(ah, sc->sc_imask); 3901 sc->sc_kickpcu = 0; 3902 ATH_UNLOCK(sc); 3903 } 3904 3905 if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 3906#ifdef IEEE80211_SUPPORT_SUPERG 3907 ieee80211_ff_age_all(ic, 100); 3908#endif 3909 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 3910 ath_start(ifp); 3911 } 3912#undef PA2DESC 3913} 3914 3915static void 3916ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3917{ 3918 txq->axq_qnum = qnum; 3919 txq->axq_ac = 0; 3920 txq->axq_depth = 0; 3921 txq->axq_intrcnt = 0; 3922 txq->axq_link = NULL; 3923 txq->axq_softc = sc; 3924 TAILQ_INIT(&txq->axq_q); 3925 TAILQ_INIT(&txq->axq_tidq); 3926 ATH_TXQ_LOCK_INIT(sc, txq); 3927} 3928 3929/* 3930 * Setup a h/w transmit queue. 3931 */ 3932static struct ath_txq * 3933ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3934{ 3935#define N(a) (sizeof(a)/sizeof(a[0])) 3936 struct ath_hal *ah = sc->sc_ah; 3937 HAL_TXQ_INFO qi; 3938 int qnum; 3939 3940 memset(&qi, 0, sizeof(qi)); 3941 qi.tqi_subtype = subtype; 3942 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3943 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3944 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3945 /* 3946 * Enable interrupts only for EOL and DESC conditions. 3947 * We mark tx descriptors to receive a DESC interrupt 3948 * when a tx queue gets deep; otherwise waiting for the 3949 * EOL to reap descriptors. Note that this is done to 3950 * reduce interrupt load and this only defers reaping 3951 * descriptors, never transmitting frames. Aside from 3952 * reducing interrupts this also permits more concurrency. 3953 * The only potential downside is if the tx queue backs 3954 * up in which case the top half of the kernel may backup 3955 * due to a lack of tx descriptors. 3956 */ 3957 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3958 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3959 if (qnum == -1) { 3960 /* 3961 * NB: don't print a message, this happens 3962 * normally on parts with too few tx queues 3963 */ 3964 return NULL; 3965 } 3966 if (qnum >= N(sc->sc_txq)) { 3967 device_printf(sc->sc_dev, 3968 "hal qnum %u out of range, max %zu!\n", 3969 qnum, N(sc->sc_txq)); 3970 ath_hal_releasetxqueue(ah, qnum); 3971 return NULL; 3972 } 3973 if (!ATH_TXQ_SETUP(sc, qnum)) { 3974 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3975 sc->sc_txqsetup |= 1<<qnum; 3976 } 3977 return &sc->sc_txq[qnum]; 3978#undef N 3979} 3980 3981/* 3982 * Setup a hardware data transmit queue for the specified 3983 * access control. The hal may not support all requested 3984 * queues in which case it will return a reference to a 3985 * previously setup queue. We record the mapping from ac's 3986 * to h/w queues for use by ath_tx_start and also track 3987 * the set of h/w queues being used to optimize work in the 3988 * transmit interrupt handler and related routines. 3989 */ 3990static int 3991ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3992{ 3993#define N(a) (sizeof(a)/sizeof(a[0])) 3994 struct ath_txq *txq; 3995 3996 if (ac >= N(sc->sc_ac2q)) { 3997 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3998 ac, N(sc->sc_ac2q)); 3999 return 0; 4000 } 4001 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 4002 if (txq != NULL) { 4003 txq->axq_ac = ac; 4004 sc->sc_ac2q[ac] = txq; 4005 return 1; 4006 } else 4007 return 0; 4008#undef N 4009} 4010 4011/* 4012 * Update WME parameters for a transmit queue. 4013 */ 4014static int 4015ath_txq_update(struct ath_softc *sc, int ac) 4016{ 4017#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 4018#define ATH_TXOP_TO_US(v) (v<<5) 4019 struct ifnet *ifp = sc->sc_ifp; 4020 struct ieee80211com *ic = ifp->if_l2com; 4021 struct ath_txq *txq = sc->sc_ac2q[ac]; 4022 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 4023 struct ath_hal *ah = sc->sc_ah; 4024 HAL_TXQ_INFO qi; 4025 4026 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 4027#ifdef IEEE80211_SUPPORT_TDMA 4028 if (sc->sc_tdma) { 4029 /* 4030 * AIFS is zero so there's no pre-transmit wait. The 4031 * burst time defines the slot duration and is configured 4032 * through net80211. The QCU is setup to not do post-xmit 4033 * back off, lockout all lower-priority QCU's, and fire 4034 * off the DMA beacon alert timer which is setup based 4035 * on the slot configuration. 4036 */ 4037 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4038 | HAL_TXQ_TXERRINT_ENABLE 4039 | HAL_TXQ_TXURNINT_ENABLE 4040 | HAL_TXQ_TXEOLINT_ENABLE 4041 | HAL_TXQ_DBA_GATED 4042 | HAL_TXQ_BACKOFF_DISABLE 4043 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 4044 ; 4045 qi.tqi_aifs = 0; 4046 /* XXX +dbaprep? */ 4047 qi.tqi_readyTime = sc->sc_tdmaslotlen; 4048 qi.tqi_burstTime = qi.tqi_readyTime; 4049 } else { 4050#endif 4051 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 4052 | HAL_TXQ_TXERRINT_ENABLE 4053 | HAL_TXQ_TXDESCINT_ENABLE 4054 | HAL_TXQ_TXURNINT_ENABLE 4055 ; 4056 qi.tqi_aifs = wmep->wmep_aifsn; 4057 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 4058 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 4059 qi.tqi_readyTime = 0; 4060 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 4061#ifdef IEEE80211_SUPPORT_TDMA 4062 } 4063#endif 4064 4065 DPRINTF(sc, ATH_DEBUG_RESET, 4066 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 4067 __func__, txq->axq_qnum, qi.tqi_qflags, 4068 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 4069 4070 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 4071 if_printf(ifp, "unable to update hardware queue " 4072 "parameters for %s traffic!\n", 4073 ieee80211_wme_acnames[ac]); 4074 return 0; 4075 } else { 4076 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4077 return 1; 4078 } 4079#undef ATH_TXOP_TO_US 4080#undef ATH_EXPONENT_TO_VALUE 4081} 4082 4083/* 4084 * Callback from the 802.11 layer to update WME parameters. 4085 */ 4086static int 4087ath_wme_update(struct ieee80211com *ic) 4088{ 4089 struct ath_softc *sc = ic->ic_ifp->if_softc; 4090 4091 return !ath_txq_update(sc, WME_AC_BE) || 4092 !ath_txq_update(sc, WME_AC_BK) || 4093 !ath_txq_update(sc, WME_AC_VI) || 4094 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4095} 4096 4097/* 4098 * Reclaim resources for a setup queue. 4099 */ 4100static void 4101ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4102{ 4103 4104 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4105 ATH_TXQ_LOCK_DESTROY(txq); 4106 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4107} 4108 4109/* 4110 * Reclaim all tx queue resources. 4111 */ 4112static void 4113ath_tx_cleanup(struct ath_softc *sc) 4114{ 4115 int i; 4116 4117 ATH_TXBUF_LOCK_DESTROY(sc); 4118 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4119 if (ATH_TXQ_SETUP(sc, i)) 4120 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4121} 4122 4123/* 4124 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4125 * using the current rates in sc_rixmap. 4126 */ 4127int 4128ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4129{ 4130 int rix = sc->sc_rixmap[rate]; 4131 /* NB: return lowest rix for invalid rate */ 4132 return (rix == 0xff ? 0 : rix); 4133} 4134 4135/* 4136 * Process completed xmit descriptors from the specified queue. 4137 */ 4138static int 4139ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4140{ 4141 struct ath_hal *ah = sc->sc_ah; 4142 struct ifnet *ifp = sc->sc_ifp; 4143 struct ieee80211com *ic = ifp->if_l2com; 4144 struct ath_buf *bf, *last; 4145 struct ath_desc *ds, *ds0; 4146 struct ath_tx_status *ts; 4147 struct ieee80211_node *ni; 4148 struct ath_node *an; 4149 int sr, lr, pri, nacked; 4150 HAL_STATUS status; 4151 4152 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4153 __func__, txq->axq_qnum, 4154 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4155 txq->axq_link); 4156 nacked = 0; 4157 for (;;) { 4158 ATH_TXQ_LOCK(txq); 4159 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4160 bf = TAILQ_FIRST(&txq->axq_q); 4161 if (bf == NULL) { 4162 ATH_TXQ_UNLOCK(txq); 4163 break; 4164 } 4165 ds0 = &bf->bf_desc[0]; 4166 ds = &bf->bf_desc[bf->bf_nseg - 1]; 4167 ts = &bf->bf_status.ds_txstat; 4168 status = ath_hal_txprocdesc(ah, ds, ts); 4169#ifdef ATH_DEBUG 4170 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4171 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4172 status == HAL_OK); 4173#endif 4174 if (status == HAL_EINPROGRESS) { 4175 ATH_TXQ_UNLOCK(txq); 4176 break; 4177 } 4178 ATH_TXQ_REMOVE(txq, bf, bf_list); 4179#ifdef IEEE80211_SUPPORT_TDMA 4180 if (txq->axq_depth > 0) { 4181 /* 4182 * More frames follow. Mark the buffer busy 4183 * so it's not re-used while the hardware may 4184 * still re-read the link field in the descriptor. 4185 */ 4186 bf->bf_flags |= ATH_BUF_BUSY; 4187 } else 4188#else 4189 if (txq->axq_depth == 0) 4190#endif 4191 txq->axq_link = NULL; 4192 ATH_TXQ_UNLOCK(txq); 4193 4194 ni = bf->bf_node; 4195 if (ni != NULL) { 4196 an = ATH_NODE(ni); 4197 if (ts->ts_status == 0) { 4198 u_int8_t txant = ts->ts_antenna; 4199 sc->sc_stats.ast_ant_tx[txant]++; 4200 sc->sc_ant_tx[txant]++; 4201 if (ts->ts_finaltsi != 0) 4202 sc->sc_stats.ast_tx_altrate++; 4203 pri = M_WME_GETAC(bf->bf_m); 4204 if (pri >= WME_AC_VO) 4205 ic->ic_wme.wme_hipri_traffic++; 4206 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0) 4207 ni->ni_inact = ni->ni_inact_reload; 4208 } else { 4209 if (ts->ts_status & HAL_TXERR_XRETRY) 4210 sc->sc_stats.ast_tx_xretries++; 4211 if (ts->ts_status & HAL_TXERR_FIFO) 4212 sc->sc_stats.ast_tx_fifoerr++; 4213 if (ts->ts_status & HAL_TXERR_FILT) 4214 sc->sc_stats.ast_tx_filtered++; 4215 if (ts->ts_status & HAL_TXERR_XTXOP) 4216 sc->sc_stats.ast_tx_xtxop++; 4217 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4218 sc->sc_stats.ast_tx_timerexpired++; 4219 4220 /* XXX HAL_TX_DATA_UNDERRUN */ 4221 /* XXX HAL_TX_DELIM_UNDERRUN */ 4222 4223 if (bf->bf_m->m_flags & M_FF) 4224 sc->sc_stats.ast_ff_txerr++; 4225 } 4226 /* XXX when is this valid? */ 4227 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 4228 sc->sc_stats.ast_tx_desccfgerr++; 4229 4230 sr = ts->ts_shortretry; 4231 lr = ts->ts_longretry; 4232 sc->sc_stats.ast_tx_shortretry += sr; 4233 sc->sc_stats.ast_tx_longretry += lr; 4234 /* 4235 * Hand the descriptor to the rate control algorithm. 4236 */ 4237 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4238 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) { 4239 /* 4240 * If frame was ack'd update statistics, 4241 * including the last rx time used to 4242 * workaround phantom bmiss interrupts. 4243 */ 4244 if (ts->ts_status == 0) { 4245 nacked++; 4246 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4247 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4248 ts->ts_rssi); 4249 } 4250 ath_rate_tx_complete(sc, an, bf); 4251 } 4252 /* 4253 * Do any tx complete callback. Note this must 4254 * be done before releasing the node reference. 4255 */ 4256 if (bf->bf_m->m_flags & M_TXCB) 4257 ieee80211_process_callback(ni, bf->bf_m, 4258 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ? 4259 ts->ts_status : HAL_TXERR_XRETRY); 4260 ieee80211_free_node(ni); 4261 } 4262 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4263 BUS_DMASYNC_POSTWRITE); 4264 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4265 4266 m_freem(bf->bf_m); 4267 bf->bf_m = NULL; 4268 bf->bf_node = NULL; 4269 4270 ATH_TXBUF_LOCK(sc); 4271 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4272 if (last != NULL) 4273 last->bf_flags &= ~ATH_BUF_BUSY; 4274 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4275 ATH_TXBUF_UNLOCK(sc); 4276 } 4277#ifdef IEEE80211_SUPPORT_SUPERG 4278 /* 4279 * Flush fast-frame staging queue when traffic slows. 4280 */ 4281 if (txq->axq_depth <= 1) 4282 ieee80211_ff_flush(ic, txq->axq_ac); 4283#endif 4284 return nacked; 4285} 4286 4287#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4288 4289/* 4290 * Deferred processing of transmit interrupt; special-cased 4291 * for a single hardware transmit queue (e.g. 5210 and 5211). 4292 */ 4293static void 4294ath_tx_proc_q0(void *arg, int npending) 4295{ 4296 struct ath_softc *sc = arg; 4297 struct ifnet *ifp = sc->sc_ifp; 4298 uint32_t txqs; 4299 4300 ATH_LOCK(sc); 4301 txqs = sc->sc_txq_active; 4302 sc->sc_txq_active &= ~txqs; 4303 ATH_UNLOCK(sc); 4304 4305 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4306 /* XXX why is lastrx updated in tx code? */ 4307 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4308 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4309 ath_tx_processq(sc, sc->sc_cabq, 1); 4310 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4311 sc->sc_wd_timer = 0; 4312 4313 if (sc->sc_softled) 4314 ath_led_event(sc, sc->sc_txrix); 4315 4316 ath_start(ifp); 4317} 4318 4319/* 4320 * Deferred processing of transmit interrupt; special-cased 4321 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4322 */ 4323static void 4324ath_tx_proc_q0123(void *arg, int npending) 4325{ 4326 struct ath_softc *sc = arg; 4327 struct ifnet *ifp = sc->sc_ifp; 4328 int nacked; 4329 uint32_t txqs; 4330 4331 ATH_LOCK(sc); 4332 txqs = sc->sc_txq_active; 4333 sc->sc_txq_active &= ~txqs; 4334 ATH_UNLOCK(sc); 4335 4336 /* 4337 * Process each active queue. 4338 */ 4339 nacked = 0; 4340 if (TXQACTIVE(txqs, 0)) 4341 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4342 if (TXQACTIVE(txqs, 1)) 4343 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4344 if (TXQACTIVE(txqs, 2)) 4345 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4346 if (TXQACTIVE(txqs, 3)) 4347 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4348 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4349 ath_tx_processq(sc, sc->sc_cabq, 1); 4350 if (nacked) 4351 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4352 4353 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4354 sc->sc_wd_timer = 0; 4355 4356 if (sc->sc_softled) 4357 ath_led_event(sc, sc->sc_txrix); 4358 4359 ath_start(ifp); 4360} 4361 4362/* 4363 * Deferred processing of transmit interrupt. 4364 */ 4365static void 4366ath_tx_proc(void *arg, int npending) 4367{ 4368 struct ath_softc *sc = arg; 4369 struct ifnet *ifp = sc->sc_ifp; 4370 int i, nacked; 4371 uint32_t txqs; 4372 4373 ATH_LOCK(sc); 4374 txqs = sc->sc_txq_active; 4375 sc->sc_txq_active &= ~txqs; 4376 ATH_UNLOCK(sc); 4377 4378 /* 4379 * Process each active queue. 4380 */ 4381 nacked = 0; 4382 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4383 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4384 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4385 if (nacked) 4386 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4387 4388 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4389 sc->sc_wd_timer = 0; 4390 4391 if (sc->sc_softled) 4392 ath_led_event(sc, sc->sc_txrix); 4393 4394 ath_start(ifp); 4395} 4396 4397static void 4398ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4399{ 4400#ifdef ATH_DEBUG 4401 struct ath_hal *ah = sc->sc_ah; 4402#endif 4403 struct ieee80211_node *ni; 4404 struct ath_buf *bf; 4405 u_int ix; 4406 4407 /* 4408 * NB: this assumes output has been stopped and 4409 * we do not need to block ath_tx_proc 4410 */ 4411 ATH_TXBUF_LOCK(sc); 4412 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 4413 if (bf != NULL) 4414 bf->bf_flags &= ~ATH_BUF_BUSY; 4415 ATH_TXBUF_UNLOCK(sc); 4416 for (ix = 0;; ix++) { 4417 ATH_TXQ_LOCK(txq); 4418 bf = TAILQ_FIRST(&txq->axq_q); 4419 if (bf == NULL) { 4420 txq->axq_link = NULL; 4421 ATH_TXQ_UNLOCK(txq); 4422 break; 4423 } 4424 ATH_TXQ_REMOVE(txq, bf, bf_list); 4425 ATH_TXQ_UNLOCK(txq); 4426#ifdef ATH_DEBUG 4427 if (sc->sc_debug & ATH_DEBUG_RESET) { 4428 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 4429 4430 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 4431 ath_hal_txprocdesc(ah, bf->bf_desc, 4432 &bf->bf_status.ds_txstat) == HAL_OK); 4433 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 4434 bf->bf_m->m_len, 0, -1); 4435 } 4436#endif /* ATH_DEBUG */ 4437 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4438 ni = bf->bf_node; 4439 bf->bf_node = NULL; 4440 if (ni != NULL) { 4441 /* 4442 * Do any callback and reclaim the node reference. 4443 */ 4444 if (bf->bf_m->m_flags & M_TXCB) 4445 ieee80211_process_callback(ni, bf->bf_m, -1); 4446 ieee80211_free_node(ni); 4447 } 4448 m_freem(bf->bf_m); 4449 bf->bf_m = NULL; 4450 bf->bf_flags &= ~ATH_BUF_BUSY; 4451 4452 ATH_TXBUF_LOCK(sc); 4453 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4454 ATH_TXBUF_UNLOCK(sc); 4455 } 4456} 4457 4458static void 4459ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4460{ 4461 struct ath_hal *ah = sc->sc_ah; 4462 4463 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4464 __func__, txq->axq_qnum, 4465 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4466 txq->axq_link); 4467 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4468} 4469 4470/* 4471 * Drain the transmit queues and reclaim resources. 4472 */ 4473static void 4474ath_draintxq(struct ath_softc *sc) 4475{ 4476 struct ath_hal *ah = sc->sc_ah; 4477 struct ifnet *ifp = sc->sc_ifp; 4478 int i; 4479 4480 /* XXX return value */ 4481 if (!sc->sc_invalid) { 4482 /* don't touch the hardware if marked invalid */ 4483 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4484 __func__, sc->sc_bhalq, 4485 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4486 NULL); 4487 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4488 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4489 if (ATH_TXQ_SETUP(sc, i)) 4490 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4491 } 4492 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4493 if (ATH_TXQ_SETUP(sc, i)) 4494 ath_tx_draintxq(sc, &sc->sc_txq[i]); 4495#ifdef ATH_DEBUG 4496 if (sc->sc_debug & ATH_DEBUG_RESET) { 4497 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 4498 if (bf != NULL && bf->bf_m != NULL) { 4499 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 4500 ath_hal_txprocdesc(ah, bf->bf_desc, 4501 &bf->bf_status.ds_txstat) == HAL_OK); 4502 ieee80211_dump_pkt(ifp->if_l2com, 4503 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 4504 0, -1); 4505 } 4506 } 4507#endif /* ATH_DEBUG */ 4508 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4509 sc->sc_wd_timer = 0; 4510} 4511 4512/* 4513 * Disable the receive h/w in preparation for a reset. 4514 */ 4515static void 4516ath_stoprecv(struct ath_softc *sc) 4517{ 4518#define PA2DESC(_sc, _pa) \ 4519 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 4520 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 4521 struct ath_hal *ah = sc->sc_ah; 4522 4523 ath_hal_stoppcurecv(ah); /* disable PCU */ 4524 ath_hal_setrxfilter(ah, 0); /* clear recv filter */ 4525 ath_hal_stopdmarecv(ah); /* disable DMA engine */ 4526 DELAY(3000); /* 3ms is long enough for 1 frame */ 4527#ifdef ATH_DEBUG 4528 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { 4529 struct ath_buf *bf; 4530 u_int ix; 4531 4532 printf("%s: rx queue %p, link %p\n", __func__, 4533 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); 4534 ix = 0; 4535 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 4536 struct ath_desc *ds = bf->bf_desc; 4537 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; 4538 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, 4539 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); 4540 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) 4541 ath_printrxbuf(sc, bf, ix, status == HAL_OK); 4542 ix++; 4543 } 4544 } 4545#endif 4546 if (sc->sc_rxpending != NULL) { 4547 m_freem(sc->sc_rxpending); 4548 sc->sc_rxpending = NULL; 4549 } 4550 sc->sc_rxlink = NULL; /* just in case */ 4551#undef PA2DESC 4552} 4553 4554/* 4555 * Enable the receive h/w following a reset. 4556 */ 4557static int 4558ath_startrecv(struct ath_softc *sc) 4559{ 4560 struct ath_hal *ah = sc->sc_ah; 4561 struct ath_buf *bf; 4562 4563 sc->sc_rxlink = NULL; 4564 sc->sc_rxpending = NULL; 4565 TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { 4566 int error = ath_rxbuf_init(sc, bf); 4567 if (error != 0) { 4568 DPRINTF(sc, ATH_DEBUG_RECV, 4569 "%s: ath_rxbuf_init failed %d\n", 4570 __func__, error); 4571 return error; 4572 } 4573 } 4574 4575 bf = TAILQ_FIRST(&sc->sc_rxbuf); 4576 ath_hal_putrxbuf(ah, bf->bf_daddr); 4577 ath_hal_rxena(ah); /* enable recv descriptors */ 4578 ath_mode_init(sc); /* set filters, etc. */ 4579 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ 4580 return 0; 4581} 4582 4583/* 4584 * Update internal state after a channel change. 4585 */ 4586static void 4587ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 4588{ 4589 enum ieee80211_phymode mode; 4590 4591 /* 4592 * Change channels and update the h/w rate map 4593 * if we're switching; e.g. 11a to 11b/g. 4594 */ 4595 mode = ieee80211_chan2mode(chan); 4596 if (mode != sc->sc_curmode) 4597 ath_setcurmode(sc, mode); 4598 sc->sc_curchan = chan; 4599} 4600 4601/* 4602 * Set/change channels. If the channel is really being changed, 4603 * it's done by resetting the chip. To accomplish this we must 4604 * first cleanup any pending DMA, then restart stuff after a la 4605 * ath_init. 4606 */ 4607static int 4608ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 4609{ 4610 struct ifnet *ifp = sc->sc_ifp; 4611 struct ieee80211com *ic = ifp->if_l2com; 4612 struct ath_hal *ah = sc->sc_ah; 4613 4614 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 4615 __func__, ieee80211_chan2ieee(ic, chan), 4616 chan->ic_freq, chan->ic_flags); 4617 if (chan != sc->sc_curchan) { 4618 HAL_STATUS status; 4619 /* 4620 * To switch channels clear any pending DMA operations; 4621 * wait long enough for the RX fifo to drain, reset the 4622 * hardware at the new frequency, and then re-enable 4623 * the relevant bits of the h/w. 4624 */ 4625 ath_hal_intrset(ah, 0); /* disable interrupts */ 4626 ath_draintxq(sc); /* clear pending tx frames */ 4627 ath_stoprecv(sc); /* turn off frame recv */ 4628 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4629 if_printf(ifp, "%s: unable to reset " 4630 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 4631 __func__, ieee80211_chan2ieee(ic, chan), 4632 chan->ic_freq, chan->ic_flags, status); 4633 return EIO; 4634 } 4635 sc->sc_diversity = ath_hal_getdiversity(ah); 4636 4637 /* Let DFS at it in case it's a DFS channel */ 4638 ath_dfs_radar_enable(sc, ic->ic_curchan); 4639 4640 /* 4641 * Re-enable rx framework. 4642 */ 4643 if (ath_startrecv(sc) != 0) { 4644 if_printf(ifp, "%s: unable to restart recv logic\n", 4645 __func__); 4646 return EIO; 4647 } 4648 4649 /* 4650 * Change channels and update the h/w rate map 4651 * if we're switching; e.g. 11a to 11b/g. 4652 */ 4653 ath_chan_change(sc, chan); 4654 4655 /* 4656 * Reset clears the beacon timers; reset them 4657 * here if needed. 4658 */ 4659 if (sc->sc_beacons) { /* restart beacons */ 4660#ifdef IEEE80211_SUPPORT_TDMA 4661 if (sc->sc_tdma) 4662 ath_tdma_config(sc, NULL); 4663 else 4664#endif 4665 ath_beacon_config(sc, NULL); 4666 } 4667 4668 /* 4669 * Re-enable interrupts. 4670 */ 4671 ath_hal_intrset(ah, sc->sc_imask); 4672 } 4673 return 0; 4674} 4675 4676/* 4677 * Periodically recalibrate the PHY to account 4678 * for temperature/environment changes. 4679 */ 4680static void 4681ath_calibrate(void *arg) 4682{ 4683 struct ath_softc *sc = arg; 4684 struct ath_hal *ah = sc->sc_ah; 4685 struct ifnet *ifp = sc->sc_ifp; 4686 struct ieee80211com *ic = ifp->if_l2com; 4687 HAL_BOOL longCal, isCalDone; 4688 HAL_BOOL aniCal, shortCal = AH_FALSE; 4689 int nextcal; 4690 4691 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 4692 goto restart; 4693 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4694 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4695 if (sc->sc_doresetcal) 4696 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4697 4698 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4699 if (aniCal) { 4700 sc->sc_stats.ast_ani_cal++; 4701 sc->sc_lastani = ticks; 4702 ath_hal_ani_poll(ah, sc->sc_curchan); 4703 } 4704 4705 if (longCal) { 4706 sc->sc_stats.ast_per_cal++; 4707 sc->sc_lastlongcal = ticks; 4708 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4709 /* 4710 * Rfgain is out of bounds, reset the chip 4711 * to load new gain values. 4712 */ 4713 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4714 "%s: rfgain change\n", __func__); 4715 sc->sc_stats.ast_per_rfgain++; 4716 ath_reset(ifp); 4717 } 4718 /* 4719 * If this long cal is after an idle period, then 4720 * reset the data collection state so we start fresh. 4721 */ 4722 if (sc->sc_resetcal) { 4723 (void) ath_hal_calreset(ah, sc->sc_curchan); 4724 sc->sc_lastcalreset = ticks; 4725 sc->sc_lastshortcal = ticks; 4726 sc->sc_resetcal = 0; 4727 sc->sc_doresetcal = AH_TRUE; 4728 } 4729 } 4730 4731 /* Only call if we're doing a short/long cal, not for ANI calibration */ 4732 if (shortCal || longCal) { 4733 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 4734 if (longCal) { 4735 /* 4736 * Calibrate noise floor data again in case of change. 4737 */ 4738 ath_hal_process_noisefloor(ah); 4739 } 4740 } else { 4741 DPRINTF(sc, ATH_DEBUG_ANY, 4742 "%s: calibration of channel %u failed\n", 4743 __func__, sc->sc_curchan->ic_freq); 4744 sc->sc_stats.ast_per_calfail++; 4745 } 4746 if (shortCal) 4747 sc->sc_lastshortcal = ticks; 4748 } 4749 if (!isCalDone) { 4750restart: 4751 /* 4752 * Use a shorter interval to potentially collect multiple 4753 * data samples required to complete calibration. Once 4754 * we're told the work is done we drop back to a longer 4755 * interval between requests. We're more aggressive doing 4756 * work when operating as an AP to improve operation right 4757 * after startup. 4758 */ 4759 sc->sc_lastshortcal = ticks; 4760 nextcal = ath_shortcalinterval*hz/1000; 4761 if (sc->sc_opmode != HAL_M_HOSTAP) 4762 nextcal *= 10; 4763 sc->sc_doresetcal = AH_TRUE; 4764 } else { 4765 /* nextcal should be the shortest time for next event */ 4766 nextcal = ath_longcalinterval*hz; 4767 if (sc->sc_lastcalreset == 0) 4768 sc->sc_lastcalreset = sc->sc_lastlongcal; 4769 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 4770 sc->sc_resetcal = 1; /* setup reset next trip */ 4771 sc->sc_doresetcal = AH_FALSE; 4772 } 4773 /* ANI calibration may occur more often than short/long/resetcal */ 4774 if (ath_anicalinterval > 0) 4775 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4776 4777 if (nextcal != 0) { 4778 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 4779 __func__, nextcal, isCalDone ? "" : "!"); 4780 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 4781 } else { 4782 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 4783 __func__); 4784 /* NB: don't rearm timer */ 4785 } 4786} 4787 4788static void 4789ath_scan_start(struct ieee80211com *ic) 4790{ 4791 struct ifnet *ifp = ic->ic_ifp; 4792 struct ath_softc *sc = ifp->if_softc; 4793 struct ath_hal *ah = sc->sc_ah; 4794 u_int32_t rfilt; 4795 4796 /* XXX calibration timer? */ 4797 4798 sc->sc_scanning = 1; 4799 sc->sc_syncbeacon = 0; 4800 rfilt = ath_calcrxfilter(sc); 4801 ath_hal_setrxfilter(ah, rfilt); 4802 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4803 4804 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 4805 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 4806} 4807 4808static void 4809ath_scan_end(struct ieee80211com *ic) 4810{ 4811 struct ifnet *ifp = ic->ic_ifp; 4812 struct ath_softc *sc = ifp->if_softc; 4813 struct ath_hal *ah = sc->sc_ah; 4814 u_int32_t rfilt; 4815 4816 sc->sc_scanning = 0; 4817 rfilt = ath_calcrxfilter(sc); 4818 ath_hal_setrxfilter(ah, rfilt); 4819 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4820 4821 ath_hal_process_noisefloor(ah); 4822 4823 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4824 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 4825 sc->sc_curaid); 4826} 4827 4828static void 4829ath_set_channel(struct ieee80211com *ic) 4830{ 4831 struct ifnet *ifp = ic->ic_ifp; 4832 struct ath_softc *sc = ifp->if_softc; 4833 4834 (void) ath_chan_set(sc, ic->ic_curchan); 4835 /* 4836 * If we are returning to our bss channel then mark state 4837 * so the next recv'd beacon's tsf will be used to sync the 4838 * beacon timers. Note that since we only hear beacons in 4839 * sta/ibss mode this has no effect in other operating modes. 4840 */ 4841 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 4842 sc->sc_syncbeacon = 1; 4843} 4844 4845/* 4846 * Walk the vap list and check if there any vap's in RUN state. 4847 */ 4848static int 4849ath_isanyrunningvaps(struct ieee80211vap *this) 4850{ 4851 struct ieee80211com *ic = this->iv_ic; 4852 struct ieee80211vap *vap; 4853 4854 IEEE80211_LOCK_ASSERT(ic); 4855 4856 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4857 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4858 return 1; 4859 } 4860 return 0; 4861} 4862 4863static int 4864ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4865{ 4866 struct ieee80211com *ic = vap->iv_ic; 4867 struct ath_softc *sc = ic->ic_ifp->if_softc; 4868 struct ath_vap *avp = ATH_VAP(vap); 4869 struct ath_hal *ah = sc->sc_ah; 4870 struct ieee80211_node *ni = NULL; 4871 int i, error, stamode; 4872 u_int32_t rfilt; 4873 int csa_run_transition = 0; 4874 static const HAL_LED_STATE leds[] = { 4875 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4876 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4877 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4878 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4879 HAL_LED_RUN, /* IEEE80211_S_CAC */ 4880 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4881 HAL_LED_RUN, /* IEEE80211_S_CSA */ 4882 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 4883 }; 4884 4885 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4886 ieee80211_state_name[vap->iv_state], 4887 ieee80211_state_name[nstate]); 4888 4889 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4890 csa_run_transition = 1; 4891 4892 callout_drain(&sc->sc_cal_ch); 4893 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4894 4895 if (nstate == IEEE80211_S_SCAN) { 4896 /* 4897 * Scanning: turn off beacon miss and don't beacon. 4898 * Mark beacon state so when we reach RUN state we'll 4899 * [re]setup beacons. Unblock the task q thread so 4900 * deferred interrupt processing is done. 4901 */ 4902 ath_hal_intrset(ah, 4903 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4904 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4905 sc->sc_beacons = 0; 4906 taskqueue_unblock(sc->sc_tq); 4907 } 4908 4909 ni = vap->iv_bss; 4910 rfilt = ath_calcrxfilter(sc); 4911 stamode = (vap->iv_opmode == IEEE80211_M_STA || 4912 vap->iv_opmode == IEEE80211_M_AHDEMO || 4913 vap->iv_opmode == IEEE80211_M_IBSS); 4914 if (stamode && nstate == IEEE80211_S_RUN) { 4915 sc->sc_curaid = ni->ni_associd; 4916 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4917 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4918 } 4919 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4920 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 4921 ath_hal_setrxfilter(ah, rfilt); 4922 4923 /* XXX is this to restore keycache on resume? */ 4924 if (vap->iv_opmode != IEEE80211_M_STA && 4925 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 4926 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4927 if (ath_hal_keyisvalid(ah, i)) 4928 ath_hal_keysetmac(ah, i, ni->ni_bssid); 4929 } 4930 4931 /* 4932 * Invoke the parent method to do net80211 work. 4933 */ 4934 error = avp->av_newstate(vap, nstate, arg); 4935 if (error != 0) 4936 goto bad; 4937 4938 if (nstate == IEEE80211_S_RUN) { 4939 /* NB: collect bss node again, it may have changed */ 4940 ni = vap->iv_bss; 4941 4942 DPRINTF(sc, ATH_DEBUG_STATE, 4943 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4944 "capinfo 0x%04x chan %d\n", __func__, 4945 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4946 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4947 4948 switch (vap->iv_opmode) { 4949#ifdef IEEE80211_SUPPORT_TDMA 4950 case IEEE80211_M_AHDEMO: 4951 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 4952 break; 4953 /* fall thru... */ 4954#endif 4955 case IEEE80211_M_HOSTAP: 4956 case IEEE80211_M_IBSS: 4957 case IEEE80211_M_MBSS: 4958 /* 4959 * Allocate and setup the beacon frame. 4960 * 4961 * Stop any previous beacon DMA. This may be 4962 * necessary, for example, when an ibss merge 4963 * causes reconfiguration; there will be a state 4964 * transition from RUN->RUN that means we may 4965 * be called with beacon transmission active. 4966 */ 4967 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4968 4969 error = ath_beacon_alloc(sc, ni); 4970 if (error != 0) 4971 goto bad; 4972 /* 4973 * If joining an adhoc network defer beacon timer 4974 * configuration to the next beacon frame so we 4975 * have a current TSF to use. Otherwise we're 4976 * starting an ibss/bss so there's no need to delay; 4977 * if this is the first vap moving to RUN state, then 4978 * beacon state needs to be [re]configured. 4979 */ 4980 if (vap->iv_opmode == IEEE80211_M_IBSS && 4981 ni->ni_tstamp.tsf != 0) { 4982 sc->sc_syncbeacon = 1; 4983 } else if (!sc->sc_beacons) { 4984#ifdef IEEE80211_SUPPORT_TDMA 4985 if (vap->iv_caps & IEEE80211_C_TDMA) 4986 ath_tdma_config(sc, vap); 4987 else 4988#endif 4989 ath_beacon_config(sc, vap); 4990 sc->sc_beacons = 1; 4991 } 4992 break; 4993 case IEEE80211_M_STA: 4994 /* 4995 * Defer beacon timer configuration to the next 4996 * beacon frame so we have a current TSF to use 4997 * (any TSF collected when scanning is likely old). 4998 * However if it's due to a CSA -> RUN transition, 4999 * force a beacon update so we pick up a lack of 5000 * beacons from an AP in CAC and thus force a 5001 * scan. 5002 */ 5003 sc->sc_syncbeacon = 1; 5004 if (csa_run_transition) 5005 ath_beacon_config(sc, vap); 5006 break; 5007 case IEEE80211_M_MONITOR: 5008 /* 5009 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5010 * transitions so we must re-enable interrupts here to 5011 * handle the case of a single monitor mode vap. 5012 */ 5013 ath_hal_intrset(ah, sc->sc_imask); 5014 break; 5015 case IEEE80211_M_WDS: 5016 break; 5017 default: 5018 break; 5019 } 5020 /* 5021 * Let the hal process statistics collected during a 5022 * scan so it can provide calibrated noise floor data. 5023 */ 5024 ath_hal_process_noisefloor(ah); 5025 /* 5026 * Reset rssi stats; maybe not the best place... 5027 */ 5028 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5029 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5030 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5031 /* 5032 * Finally, start any timers and the task q thread 5033 * (in case we didn't go through SCAN state). 5034 */ 5035 if (ath_longcalinterval != 0) { 5036 /* start periodic recalibration timer */ 5037 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5038 } else { 5039 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5040 "%s: calibration disabled\n", __func__); 5041 } 5042 taskqueue_unblock(sc->sc_tq); 5043 } else if (nstate == IEEE80211_S_INIT) { 5044 /* 5045 * If there are no vaps left in RUN state then 5046 * shutdown host/driver operation: 5047 * o disable interrupts 5048 * o disable the task queue thread 5049 * o mark beacon processing as stopped 5050 */ 5051 if (!ath_isanyrunningvaps(vap)) { 5052 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5053 /* disable interrupts */ 5054 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5055 taskqueue_block(sc->sc_tq); 5056 sc->sc_beacons = 0; 5057 } 5058#ifdef IEEE80211_SUPPORT_TDMA 5059 ath_hal_setcca(ah, AH_TRUE); 5060#endif 5061 } 5062bad: 5063 return error; 5064} 5065 5066/* 5067 * Allocate a key cache slot to the station so we can 5068 * setup a mapping from key index to node. The key cache 5069 * slot is needed for managing antenna state and for 5070 * compression when stations do not use crypto. We do 5071 * it uniliaterally here; if crypto is employed this slot 5072 * will be reassigned. 5073 */ 5074static void 5075ath_setup_stationkey(struct ieee80211_node *ni) 5076{ 5077 struct ieee80211vap *vap = ni->ni_vap; 5078 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5079 ieee80211_keyix keyix, rxkeyix; 5080 5081 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5082 /* 5083 * Key cache is full; we'll fall back to doing 5084 * the more expensive lookup in software. Note 5085 * this also means no h/w compression. 5086 */ 5087 /* XXX msg+statistic */ 5088 } else { 5089 /* XXX locking? */ 5090 ni->ni_ucastkey.wk_keyix = keyix; 5091 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 5092 /* NB: must mark device key to get called back on delete */ 5093 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5094 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5095 /* NB: this will create a pass-thru key entry */ 5096 ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss); 5097 } 5098} 5099 5100/* 5101 * Setup driver-specific state for a newly associated node. 5102 * Note that we're called also on a re-associate, the isnew 5103 * param tells us if this is the first time or not. 5104 */ 5105static void 5106ath_newassoc(struct ieee80211_node *ni, int isnew) 5107{ 5108 struct ath_node *an = ATH_NODE(ni); 5109 struct ieee80211vap *vap = ni->ni_vap; 5110 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 5111 const struct ieee80211_txparam *tp = ni->ni_txparms; 5112 5113 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5114 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5115 5116 ath_rate_newassoc(sc, an, isnew); 5117 if (isnew && 5118 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5119 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5120 ath_setup_stationkey(ni); 5121} 5122 5123static int 5124ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 5125 int nchans, struct ieee80211_channel chans[]) 5126{ 5127 struct ath_softc *sc = ic->ic_ifp->if_softc; 5128 struct ath_hal *ah = sc->sc_ah; 5129 HAL_STATUS status; 5130 5131 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5132 "%s: rd %u cc %u location %c%s\n", 5133 __func__, reg->regdomain, reg->country, reg->location, 5134 reg->ecm ? " ecm" : ""); 5135 5136 status = ath_hal_set_channels(ah, chans, nchans, 5137 reg->country, reg->regdomain); 5138 if (status != HAL_OK) { 5139 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 5140 __func__, status); 5141 return EINVAL; /* XXX */ 5142 } 5143 5144 return 0; 5145} 5146 5147static void 5148ath_getradiocaps(struct ieee80211com *ic, 5149 int maxchans, int *nchans, struct ieee80211_channel chans[]) 5150{ 5151 struct ath_softc *sc = ic->ic_ifp->if_softc; 5152 struct ath_hal *ah = sc->sc_ah; 5153 5154 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 5155 __func__, SKU_DEBUG, CTRY_DEFAULT); 5156 5157 /* XXX check return */ 5158 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 5159 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 5160 5161} 5162 5163static int 5164ath_getchannels(struct ath_softc *sc) 5165{ 5166 struct ifnet *ifp = sc->sc_ifp; 5167 struct ieee80211com *ic = ifp->if_l2com; 5168 struct ath_hal *ah = sc->sc_ah; 5169 HAL_STATUS status; 5170 5171 /* 5172 * Collect channel set based on EEPROM contents. 5173 */ 5174 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 5175 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 5176 if (status != HAL_OK) { 5177 if_printf(ifp, "%s: unable to collect channel list from hal, " 5178 "status %d\n", __func__, status); 5179 return EINVAL; 5180 } 5181 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 5182 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 5183 /* XXX map Atheros sku's to net80211 SKU's */ 5184 /* XXX net80211 types too small */ 5185 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 5186 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 5187 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 5188 ic->ic_regdomain.isocc[1] = ' '; 5189 5190 ic->ic_regdomain.ecm = 1; 5191 ic->ic_regdomain.location = 'I'; 5192 5193 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5194 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 5195 __func__, sc->sc_eerd, sc->sc_eecc, 5196 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 5197 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 5198 return 0; 5199} 5200 5201static void 5202ath_led_done(void *arg) 5203{ 5204 struct ath_softc *sc = arg; 5205 5206 sc->sc_blinking = 0; 5207} 5208 5209/* 5210 * Turn the LED off: flip the pin and then set a timer so no 5211 * update will happen for the specified duration. 5212 */ 5213static void 5214ath_led_off(void *arg) 5215{ 5216 struct ath_softc *sc = arg; 5217 5218 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); 5219 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); 5220} 5221 5222/* 5223 * Blink the LED according to the specified on/off times. 5224 */ 5225static void 5226ath_led_blink(struct ath_softc *sc, int on, int off) 5227{ 5228 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); 5229 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); 5230 sc->sc_blinking = 1; 5231 sc->sc_ledoff = off; 5232 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); 5233} 5234 5235static void 5236ath_led_event(struct ath_softc *sc, int rix) 5237{ 5238 sc->sc_ledevent = ticks; /* time of last event */ 5239 if (sc->sc_blinking) /* don't interrupt active blink */ 5240 return; 5241 ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff); 5242} 5243 5244static int 5245ath_rate_setup(struct ath_softc *sc, u_int mode) 5246{ 5247 struct ath_hal *ah = sc->sc_ah; 5248 const HAL_RATE_TABLE *rt; 5249 5250 switch (mode) { 5251 case IEEE80211_MODE_11A: 5252 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 5253 break; 5254 case IEEE80211_MODE_HALF: 5255 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 5256 break; 5257 case IEEE80211_MODE_QUARTER: 5258 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 5259 break; 5260 case IEEE80211_MODE_11B: 5261 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 5262 break; 5263 case IEEE80211_MODE_11G: 5264 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 5265 break; 5266 case IEEE80211_MODE_TURBO_A: 5267 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 5268 break; 5269 case IEEE80211_MODE_TURBO_G: 5270 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 5271 break; 5272 case IEEE80211_MODE_STURBO_A: 5273 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 5274 break; 5275 case IEEE80211_MODE_11NA: 5276 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 5277 break; 5278 case IEEE80211_MODE_11NG: 5279 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 5280 break; 5281 default: 5282 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 5283 __func__, mode); 5284 return 0; 5285 } 5286 sc->sc_rates[mode] = rt; 5287 return (rt != NULL); 5288} 5289 5290static void 5291ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 5292{ 5293#define N(a) (sizeof(a)/sizeof(a[0])) 5294 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 5295 static const struct { 5296 u_int rate; /* tx/rx 802.11 rate */ 5297 u_int16_t timeOn; /* LED on time (ms) */ 5298 u_int16_t timeOff; /* LED off time (ms) */ 5299 } blinkrates[] = { 5300 { 108, 40, 10 }, 5301 { 96, 44, 11 }, 5302 { 72, 50, 13 }, 5303 { 48, 57, 14 }, 5304 { 36, 67, 16 }, 5305 { 24, 80, 20 }, 5306 { 22, 100, 25 }, 5307 { 18, 133, 34 }, 5308 { 12, 160, 40 }, 5309 { 10, 200, 50 }, 5310 { 6, 240, 58 }, 5311 { 4, 267, 66 }, 5312 { 2, 400, 100 }, 5313 { 0, 500, 130 }, 5314 /* XXX half/quarter rates */ 5315 }; 5316 const HAL_RATE_TABLE *rt; 5317 int i, j; 5318 5319 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 5320 rt = sc->sc_rates[mode]; 5321 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 5322 for (i = 0; i < rt->rateCount; i++) { 5323 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 5324 if (rt->info[i].phy != IEEE80211_T_HT) 5325 sc->sc_rixmap[ieeerate] = i; 5326 else 5327 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 5328 } 5329 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 5330 for (i = 0; i < N(sc->sc_hwmap); i++) { 5331 if (i >= rt->rateCount) { 5332 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 5333 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 5334 continue; 5335 } 5336 sc->sc_hwmap[i].ieeerate = 5337 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 5338 if (rt->info[i].phy == IEEE80211_T_HT) 5339 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 5340 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 5341 if (rt->info[i].shortPreamble || 5342 rt->info[i].phy == IEEE80211_T_OFDM) 5343 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 5344 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 5345 for (j = 0; j < N(blinkrates)-1; j++) 5346 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 5347 break; 5348 /* NB: this uses the last entry if the rate isn't found */ 5349 /* XXX beware of overlow */ 5350 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 5351 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 5352 } 5353 sc->sc_currates = rt; 5354 sc->sc_curmode = mode; 5355 /* 5356 * All protection frames are transmited at 2Mb/s for 5357 * 11g, otherwise at 1Mb/s. 5358 */ 5359 if (mode == IEEE80211_MODE_11G) 5360 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 5361 else 5362 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 5363 /* NB: caller is responsible for resetting rate control state */ 5364#undef N 5365} 5366 5367static void 5368ath_watchdog(void *arg) 5369{ 5370 struct ath_softc *sc = arg; 5371 5372 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 5373 struct ifnet *ifp = sc->sc_ifp; 5374 uint32_t hangs; 5375 5376 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 5377 hangs != 0) { 5378 if_printf(ifp, "%s hang detected (0x%x)\n", 5379 hangs & 0xff ? "bb" : "mac", hangs); 5380 } else 5381 if_printf(ifp, "device timeout\n"); 5382 ath_reset(ifp); 5383 ifp->if_oerrors++; 5384 sc->sc_stats.ast_watchdog++; 5385 } 5386 callout_schedule(&sc->sc_wd_ch, hz); 5387} 5388 5389#ifdef ATH_DIAGAPI 5390/* 5391 * Diagnostic interface to the HAL. This is used by various 5392 * tools to do things like retrieve register contents for 5393 * debugging. The mechanism is intentionally opaque so that 5394 * it can change frequently w/o concern for compatiblity. 5395 */ 5396static int 5397ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 5398{ 5399 struct ath_hal *ah = sc->sc_ah; 5400 u_int id = ad->ad_id & ATH_DIAG_ID; 5401 void *indata = NULL; 5402 void *outdata = NULL; 5403 u_int32_t insize = ad->ad_in_size; 5404 u_int32_t outsize = ad->ad_out_size; 5405 int error = 0; 5406 5407 if (ad->ad_id & ATH_DIAG_IN) { 5408 /* 5409 * Copy in data. 5410 */ 5411 indata = malloc(insize, M_TEMP, M_NOWAIT); 5412 if (indata == NULL) { 5413 error = ENOMEM; 5414 goto bad; 5415 } 5416 error = copyin(ad->ad_in_data, indata, insize); 5417 if (error) 5418 goto bad; 5419 } 5420 if (ad->ad_id & ATH_DIAG_DYN) { 5421 /* 5422 * Allocate a buffer for the results (otherwise the HAL 5423 * returns a pointer to a buffer where we can read the 5424 * results). Note that we depend on the HAL leaving this 5425 * pointer for us to use below in reclaiming the buffer; 5426 * may want to be more defensive. 5427 */ 5428 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 5429 if (outdata == NULL) { 5430 error = ENOMEM; 5431 goto bad; 5432 } 5433 } 5434 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 5435 if (outsize < ad->ad_out_size) 5436 ad->ad_out_size = outsize; 5437 if (outdata != NULL) 5438 error = copyout(outdata, ad->ad_out_data, 5439 ad->ad_out_size); 5440 } else { 5441 error = EINVAL; 5442 } 5443bad: 5444 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 5445 free(indata, M_TEMP); 5446 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 5447 free(outdata, M_TEMP); 5448 return error; 5449} 5450#endif /* ATH_DIAGAPI */ 5451 5452static int 5453ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 5454{ 5455#define IS_RUNNING(ifp) \ 5456 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 5457 struct ath_softc *sc = ifp->if_softc; 5458 struct ieee80211com *ic = ifp->if_l2com; 5459 struct ifreq *ifr = (struct ifreq *)data; 5460 const HAL_RATE_TABLE *rt; 5461 int error = 0; 5462 5463 switch (cmd) { 5464 case SIOCSIFFLAGS: 5465 ATH_LOCK(sc); 5466 if (IS_RUNNING(ifp)) { 5467 /* 5468 * To avoid rescanning another access point, 5469 * do not call ath_init() here. Instead, 5470 * only reflect promisc mode settings. 5471 */ 5472 ath_mode_init(sc); 5473 } else if (ifp->if_flags & IFF_UP) { 5474 /* 5475 * Beware of being called during attach/detach 5476 * to reset promiscuous mode. In that case we 5477 * will still be marked UP but not RUNNING. 5478 * However trying to re-init the interface 5479 * is the wrong thing to do as we've already 5480 * torn down much of our state. There's 5481 * probably a better way to deal with this. 5482 */ 5483 if (!sc->sc_invalid) 5484 ath_init(sc); /* XXX lose error */ 5485 } else { 5486 ath_stop_locked(ifp); 5487#ifdef notyet 5488 /* XXX must wakeup in places like ath_vap_delete */ 5489 if (!sc->sc_invalid) 5490 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 5491#endif 5492 } 5493 ATH_UNLOCK(sc); 5494 break; 5495 case SIOCGIFMEDIA: 5496 case SIOCSIFMEDIA: 5497 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 5498 break; 5499 case SIOCGATHSTATS: 5500 /* NB: embed these numbers to get a consistent view */ 5501 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 5502 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 5503 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 5504 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 5505#ifdef IEEE80211_SUPPORT_TDMA 5506 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 5507 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 5508#endif 5509 rt = sc->sc_currates; 5510 sc->sc_stats.ast_tx_rate = 5511 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 5512 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 5513 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 5514 return copyout(&sc->sc_stats, 5515 ifr->ifr_data, sizeof (sc->sc_stats)); 5516 case SIOCZATHSTATS: 5517 error = priv_check(curthread, PRIV_DRIVER); 5518 if (error == 0) 5519 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 5520 break; 5521#ifdef ATH_DIAGAPI 5522 case SIOCGATHDIAG: 5523 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 5524 break; 5525 case SIOCGATHPHYERR: 5526 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 5527 break; 5528#endif 5529 case SIOCGIFADDR: 5530 error = ether_ioctl(ifp, cmd, data); 5531 break; 5532 default: 5533 error = EINVAL; 5534 break; 5535 } 5536 return error; 5537#undef IS_RUNNING 5538} 5539 5540/* 5541 * Announce various information on device/driver attach. 5542 */ 5543static void 5544ath_announce(struct ath_softc *sc) 5545{ 5546 struct ifnet *ifp = sc->sc_ifp; 5547 struct ath_hal *ah = sc->sc_ah; 5548 5549 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 5550 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 5551 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 5552 if (bootverbose) { 5553 int i; 5554 for (i = 0; i <= WME_AC_VO; i++) { 5555 struct ath_txq *txq = sc->sc_ac2q[i]; 5556 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5557 txq->axq_qnum, ieee80211_wme_acnames[i]); 5558 } 5559 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5560 sc->sc_cabq->axq_qnum); 5561 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5562 } 5563 if (ath_rxbuf != ATH_RXBUF) 5564 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5565 if (ath_txbuf != ATH_TXBUF) 5566 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5567 if (sc->sc_mcastkey && bootverbose) 5568 if_printf(ifp, "using multicast key search\n"); 5569} 5570 5571#ifdef IEEE80211_SUPPORT_TDMA 5572static void 5573ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval) 5574{ 5575 struct ath_hal *ah = sc->sc_ah; 5576 HAL_BEACON_TIMERS bt; 5577 5578 bt.bt_intval = bintval | HAL_BEACON_ENA; 5579 bt.bt_nexttbtt = nexttbtt; 5580 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep; 5581 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep; 5582 bt.bt_nextatim = nexttbtt+1; 5583 /* Enables TBTT, DBA, SWBA timers by default */ 5584 bt.bt_flags = 0; 5585 ath_hal_beaconsettimers(ah, &bt); 5586} 5587 5588/* 5589 * Calculate the beacon interval. This is periodic in the 5590 * superframe for the bss. We assume each station is configured 5591 * identically wrt transmit rate so the guard time we calculate 5592 * above will be the same on all stations. Note we need to 5593 * factor in the xmit time because the hardware will schedule 5594 * a frame for transmit if the start of the frame is within 5595 * the burst time. When we get hardware that properly kills 5596 * frames in the PCU we can reduce/eliminate the guard time. 5597 * 5598 * Roundup to 1024 is so we have 1 TU buffer in the guard time 5599 * to deal with the granularity of the nexttbtt timer. 11n MAC's 5600 * with 1us timer granularity should allow us to reduce/eliminate 5601 * this. 5602 */ 5603static void 5604ath_tdma_bintvalsetup(struct ath_softc *sc, 5605 const struct ieee80211_tdma_state *tdma) 5606{ 5607 /* copy from vap state (XXX check all vaps have same value?) */ 5608 sc->sc_tdmaslotlen = tdma->tdma_slotlen; 5609 5610 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) * 5611 tdma->tdma_slotcnt, 1024); 5612 sc->sc_tdmabintval >>= 10; /* TSF -> TU */ 5613 if (sc->sc_tdmabintval & 1) 5614 sc->sc_tdmabintval++; 5615 5616 if (tdma->tdma_slot == 0) { 5617 /* 5618 * Only slot 0 beacons; other slots respond. 5619 */ 5620 sc->sc_imask |= HAL_INT_SWBA; 5621 sc->sc_tdmaswba = 0; /* beacon immediately */ 5622 } else { 5623 /* XXX all vaps must be slot 0 or slot !0 */ 5624 sc->sc_imask &= ~HAL_INT_SWBA; 5625 } 5626} 5627 5628/* 5629 * Max 802.11 overhead. This assumes no 4-address frames and 5630 * the encapsulation done by ieee80211_encap (llc). We also 5631 * include potential crypto overhead. 5632 */ 5633#define IEEE80211_MAXOVERHEAD \ 5634 (sizeof(struct ieee80211_qosframe) \ 5635 + sizeof(struct llc) \ 5636 + IEEE80211_ADDR_LEN \ 5637 + IEEE80211_WEP_IVLEN \ 5638 + IEEE80211_WEP_KIDLEN \ 5639 + IEEE80211_WEP_CRCLEN \ 5640 + IEEE80211_WEP_MICLEN \ 5641 + IEEE80211_CRC_LEN) 5642 5643/* 5644 * Setup initially for tdma operation. Start the beacon 5645 * timers and enable SWBA if we are slot 0. Otherwise 5646 * we wait for slot 0 to arrive so we can sync up before 5647 * starting to transmit. 5648 */ 5649static void 5650ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap) 5651{ 5652 struct ath_hal *ah = sc->sc_ah; 5653 struct ifnet *ifp = sc->sc_ifp; 5654 struct ieee80211com *ic = ifp->if_l2com; 5655 const struct ieee80211_txparam *tp; 5656 const struct ieee80211_tdma_state *tdma = NULL; 5657 int rix; 5658 5659 if (vap == NULL) { 5660 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ 5661 if (vap == NULL) { 5662 if_printf(ifp, "%s: no vaps?\n", __func__); 5663 return; 5664 } 5665 } 5666 tp = vap->iv_bss->ni_txparms; 5667 /* 5668 * Calculate the guard time for each slot. This is the 5669 * time to send a maximal-size frame according to the 5670 * fixed/lowest transmit rate. Note that the interface 5671 * mtu does not include the 802.11 overhead so we must 5672 * tack that on (ath_hal_computetxtime includes the 5673 * preamble and plcp in it's calculation). 5674 */ 5675 tdma = vap->iv_tdma; 5676 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 5677 rix = ath_tx_findrix(sc, tp->ucastrate); 5678 else 5679 rix = ath_tx_findrix(sc, tp->mcastrate); 5680 /* XXX short preamble assumed */ 5681 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates, 5682 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE); 5683 5684 ath_hal_intrset(ah, 0); 5685 5686 ath_beaconq_config(sc); /* setup h/w beacon q */ 5687 if (sc->sc_setcca) 5688 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */ 5689 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */ 5690 ath_tdma_settimers(sc, sc->sc_tdmabintval, 5691 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF); 5692 sc->sc_syncbeacon = 0; 5693 5694 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER; 5695 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER; 5696 5697 ath_hal_intrset(ah, sc->sc_imask); 5698 5699 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u " 5700 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__, 5701 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt, 5702 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval, 5703 sc->sc_tdmadbaprep); 5704} 5705 5706/* 5707 * Update tdma operation. Called from the 802.11 layer 5708 * when a beacon is received from the TDMA station operating 5709 * in the slot immediately preceding us in the bss. Use 5710 * the rx timestamp for the beacon frame to update our 5711 * beacon timers so we follow their schedule. Note that 5712 * by using the rx timestamp we implicitly include the 5713 * propagation delay in our schedule. 5714 */ 5715static void 5716ath_tdma_update(struct ieee80211_node *ni, 5717 const struct ieee80211_tdma_param *tdma, int changed) 5718{ 5719#define TSF_TO_TU(_h,_l) \ 5720 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) 5721#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) 5722 struct ieee80211vap *vap = ni->ni_vap; 5723 struct ieee80211com *ic = ni->ni_ic; 5724 struct ath_softc *sc = ic->ic_ifp->if_softc; 5725 struct ath_hal *ah = sc->sc_ah; 5726 const HAL_RATE_TABLE *rt = sc->sc_currates; 5727 u_int64_t tsf, rstamp, nextslot, nexttbtt; 5728 u_int32_t txtime, nextslottu; 5729 int32_t tudelta, tsfdelta; 5730 const struct ath_rx_status *rs; 5731 int rix; 5732 5733 sc->sc_stats.ast_tdma_update++; 5734 5735 /* 5736 * Check for and adopt configuration changes. 5737 */ 5738 if (changed != 0) { 5739 const struct ieee80211_tdma_state *ts = vap->iv_tdma; 5740 5741 ath_tdma_bintvalsetup(sc, ts); 5742 if (changed & TDMA_UPDATE_SLOTLEN) 5743 ath_wme_update(ic); 5744 5745 DPRINTF(sc, ATH_DEBUG_TDMA, 5746 "%s: adopt slot %u slotcnt %u slotlen %u us " 5747 "bintval %u TU\n", __func__, 5748 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen, 5749 sc->sc_tdmabintval); 5750 5751 /* XXX right? */ 5752 ath_hal_intrset(ah, sc->sc_imask); 5753 /* NB: beacon timers programmed below */ 5754 } 5755 5756 /* extend rx timestamp to 64 bits */ 5757 rs = sc->sc_lastrs; 5758 tsf = ath_hal_gettsf64(ah); 5759 rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); 5760 /* 5761 * The rx timestamp is set by the hardware on completing 5762 * reception (at the point where the rx descriptor is DMA'd 5763 * to the host). To find the start of our next slot we 5764 * must adjust this time by the time required to send 5765 * the packet just received. 5766 */ 5767 rix = rt->rateCodeToIndex[rs->rs_rate]; 5768 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix, 5769 rt->info[rix].shortPreamble); 5770 /* NB: << 9 is to cvt to TU and /2 */ 5771 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9); 5772 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD; 5773 5774 /* 5775 * Retrieve the hardware NextTBTT in usecs 5776 * and calculate the difference between what the 5777 * other station thinks and what we have programmed. This 5778 * lets us figure how to adjust our timers to match. The 5779 * adjustments are done by pulling the TSF forward and possibly 5780 * rewriting the beacon timers. 5781 */ 5782 nexttbtt = ath_hal_getnexttbtt(ah); 5783 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt); 5784 5785 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 5786 "tsfdelta %d avg +%d/-%d\n", tsfdelta, 5787 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam)); 5788 5789 if (tsfdelta < 0) { 5790 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 5791 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta); 5792 tsfdelta = -tsfdelta % 1024; 5793 nextslottu++; 5794 } else if (tsfdelta > 0) { 5795 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta); 5796 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 5797 tsfdelta = 1024 - (tsfdelta % 1024); 5798 nextslottu++; 5799 } else { 5800 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0); 5801 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0); 5802 } 5803 tudelta = nextslottu - TSF_TO_TU(nexttbtt >> 32, nexttbtt); 5804 5805 /* 5806 * Copy sender's timetstamp into tdma ie so they can 5807 * calculate roundtrip time. We submit a beacon frame 5808 * below after any timer adjustment. The frame goes out 5809 * at the next TBTT so the sender can calculate the 5810 * roundtrip by inspecting the tdma ie in our beacon frame. 5811 * 5812 * NB: This tstamp is subtlely preserved when 5813 * IEEE80211_BEACON_TDMA is marked (e.g. when the 5814 * slot position changes) because ieee80211_add_tdma 5815 * skips over the data. 5816 */ 5817 memcpy(ATH_VAP(vap)->av_boff.bo_tdma + 5818 __offsetof(struct ieee80211_tdma_param, tdma_tstamp), 5819 &ni->ni_tstamp.data, 8); 5820#if 0 5821 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER, 5822 "tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n", 5823 (unsigned long long) tsf, (unsigned long long) nextslot, 5824 (int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta); 5825#endif 5826 /* 5827 * Adjust the beacon timers only when pulling them forward 5828 * or when going back by less than the beacon interval. 5829 * Negative jumps larger than the beacon interval seem to 5830 * cause the timers to stop and generally cause instability. 5831 * This basically filters out jumps due to missed beacons. 5832 */ 5833 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) { 5834 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval); 5835 sc->sc_stats.ast_tdma_timers++; 5836 } 5837 if (tsfdelta > 0) { 5838 ath_hal_adjusttsf(ah, tsfdelta); 5839 sc->sc_stats.ast_tdma_tsf++; 5840 } 5841 ath_tdma_beacon_send(sc, vap); /* prepare response */ 5842#undef TU_TO_TSF 5843#undef TSF_TO_TU 5844} 5845 5846/* 5847 * Transmit a beacon frame at SWBA. Dynamic updates 5848 * to the frame contents are done as needed. 5849 */ 5850static void 5851ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) 5852{ 5853 struct ath_hal *ah = sc->sc_ah; 5854 struct ath_buf *bf; 5855 int otherant; 5856 5857 /* 5858 * Check if the previous beacon has gone out. If 5859 * not don't try to post another, skip this period 5860 * and wait for the next. Missed beacons indicate 5861 * a problem and should not occur. If we miss too 5862 * many consecutive beacons reset the device. 5863 */ 5864 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { 5865 sc->sc_bmisscount++; 5866 DPRINTF(sc, ATH_DEBUG_BEACON, 5867 "%s: missed %u consecutive beacons\n", 5868 __func__, sc->sc_bmisscount); 5869 if (sc->sc_bmisscount >= ath_bstuck_threshold) 5870 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); 5871 return; 5872 } 5873 if (sc->sc_bmisscount != 0) { 5874 DPRINTF(sc, ATH_DEBUG_BEACON, 5875 "%s: resume beacon xmit after %u misses\n", 5876 __func__, sc->sc_bmisscount); 5877 sc->sc_bmisscount = 0; 5878 } 5879 5880 /* 5881 * Check recent per-antenna transmit statistics and flip 5882 * the default antenna if noticeably more frames went out 5883 * on the non-default antenna. 5884 * XXX assumes 2 anntenae 5885 */ 5886 if (!sc->sc_diversity) { 5887 otherant = sc->sc_defant & 1 ? 2 : 1; 5888 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) 5889 ath_setdefantenna(sc, otherant); 5890 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; 5891 } 5892 5893 bf = ath_beacon_generate(sc, vap); 5894 if (bf != NULL) { 5895 /* 5896 * Stop any current dma and put the new frame on the queue. 5897 * This should never fail since we check above that no frames 5898 * are still pending on the queue. 5899 */ 5900 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { 5901 DPRINTF(sc, ATH_DEBUG_ANY, 5902 "%s: beacon queue %u did not stop?\n", 5903 __func__, sc->sc_bhalq); 5904 /* NB: the HAL still stops DMA, so proceed */ 5905 } 5906 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 5907 ath_hal_txstart(ah, sc->sc_bhalq); 5908 5909 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */ 5910 5911 /* 5912 * Record local TSF for our last send for use 5913 * in arbitrating slot collisions. 5914 */ 5915 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah); 5916 } 5917} 5918#endif /* IEEE80211_SUPPORT_TDMA */ 5919 5920static void 5921ath_dfs_tasklet(void *p, int npending) 5922{ 5923 struct ath_softc *sc = (struct ath_softc *) p; 5924 struct ifnet *ifp = sc->sc_ifp; 5925 struct ieee80211com *ic = ifp->if_l2com; 5926 5927 /* 5928 * If previous processing has found a radar event, 5929 * signal this to the net80211 layer to begin DFS 5930 * processing. 5931 */ 5932 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 5933 /* DFS event found, initiate channel change */ 5934 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 5935 } 5936} 5937 5938MODULE_VERSION(if_ath, 1); 5939MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 5940