if_ath.c revision 237179
1/*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 237179 2012-06-17 03:08:33Z adrian $"); 32 33/* 34 * Driver for the Atheros Wireless LAN controller. 35 * 36 * This software is derived from work of Atsushi Onoe; his contribution 37 * is greatly appreciated. 38 */ 39 40#include "opt_inet.h" 41#include "opt_ath.h" 42/* 43 * This is needed for register operations which are performed 44 * by the driver - eg, calls to ath_hal_gettsf32(). 45 * 46 * It's also required for any AH_DEBUG checks in here, eg the 47 * module dependencies. 48 */ 49#include "opt_ah.h" 50#include "opt_wlan.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysctl.h> 55#include <sys/mbuf.h> 56#include <sys/malloc.h> 57#include <sys/lock.h> 58#include <sys/mutex.h> 59#include <sys/kernel.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/errno.h> 63#include <sys/callout.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kthread.h> 67#include <sys/taskqueue.h> 68#include <sys/priv.h> 69#include <sys/module.h> 70#include <sys/ktr.h> 71#include <sys/smp.h> /* for mp_ncpus */ 72 73#include <machine/bus.h> 74 75#include <net/if.h> 76#include <net/if_dl.h> 77#include <net/if_media.h> 78#include <net/if_types.h> 79#include <net/if_arp.h> 80#include <net/ethernet.h> 81#include <net/if_llc.h> 82 83#include <net80211/ieee80211_var.h> 84#include <net80211/ieee80211_regdomain.h> 85#ifdef IEEE80211_SUPPORT_SUPERG 86#include <net80211/ieee80211_superg.h> 87#endif 88#ifdef IEEE80211_SUPPORT_TDMA 89#include <net80211/ieee80211_tdma.h> 90#endif 91 92#include <net/bpf.h> 93 94#ifdef INET 95#include <netinet/in.h> 96#include <netinet/if_ether.h> 97#endif 98 99#include <dev/ath/if_athvar.h> 100#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 101#include <dev/ath/ath_hal/ah_diagcodes.h> 102 103#include <dev/ath/if_ath_debug.h> 104#include <dev/ath/if_ath_misc.h> 105#include <dev/ath/if_ath_tsf.h> 106#include <dev/ath/if_ath_tx.h> 107#include <dev/ath/if_ath_sysctl.h> 108#include <dev/ath/if_ath_led.h> 109#include <dev/ath/if_ath_keycache.h> 110#include <dev/ath/if_ath_rx.h> 111#include <dev/ath/if_ath_beacon.h> 112#include <dev/ath/if_athdfs.h> 113 114#ifdef ATH_TX99_DIAG 115#include <dev/ath/ath_tx99/ath_tx99.h> 116#endif 117 118#define ATH_KTR_INTR KTR_SPARE4 119#define ATH_KTR_ERR KTR_SPARE3 120 121/* 122 * ATH_BCBUF determines the number of vap's that can transmit 123 * beacons and also (currently) the number of vap's that can 124 * have unique mac addresses/bssid. When staggering beacons 125 * 4 is probably a good max as otherwise the beacons become 126 * very closely spaced and there is limited time for cab q traffic 127 * to go out. You can burst beacons instead but that is not good 128 * for stations in power save and at some point you really want 129 * another radio (and channel). 130 * 131 * The limit on the number of mac addresses is tied to our use of 132 * the U/L bit and tracking addresses in a byte; it would be 133 * worthwhile to allow more for applications like proxy sta. 134 */ 135CTASSERT(ATH_BCBUF <= 8); 136 137static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 138 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 139 const uint8_t [IEEE80211_ADDR_LEN], 140 const uint8_t [IEEE80211_ADDR_LEN]); 141static void ath_vap_delete(struct ieee80211vap *); 142static void ath_init(void *); 143static void ath_stop_locked(struct ifnet *); 144static void ath_stop(struct ifnet *); 145static int ath_reset_vap(struct ieee80211vap *, u_long); 146static int ath_media_change(struct ifnet *); 147static void ath_watchdog(void *); 148static int ath_ioctl(struct ifnet *, u_long, caddr_t); 149static void ath_fatal_proc(void *, int); 150static void ath_bmiss_vap(struct ieee80211vap *); 151static void ath_bmiss_proc(void *, int); 152static void ath_key_update_begin(struct ieee80211vap *); 153static void ath_key_update_end(struct ieee80211vap *); 154static void ath_update_mcast(struct ifnet *); 155static void ath_update_promisc(struct ifnet *); 156static void ath_updateslot(struct ifnet *); 157static void ath_bstuck_proc(void *, int); 158static void ath_reset_proc(void *, int); 159static void ath_descdma_cleanup(struct ath_softc *sc, 160 struct ath_descdma *, ath_bufhead *); 161static int ath_desc_alloc(struct ath_softc *); 162static void ath_desc_free(struct ath_softc *); 163static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 164 const uint8_t [IEEE80211_ADDR_LEN]); 165static void ath_node_cleanup(struct ieee80211_node *); 166static void ath_node_free(struct ieee80211_node *); 167static void ath_node_getsignal(const struct ieee80211_node *, 168 int8_t *, int8_t *); 169static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 170static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 171static int ath_tx_setup(struct ath_softc *, int, int); 172static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 173static void ath_tx_cleanup(struct ath_softc *); 174static void ath_tx_proc_q0(void *, int); 175static void ath_tx_proc_q0123(void *, int); 176static void ath_tx_proc(void *, int); 177static void ath_txq_sched_tasklet(void *, int); 178static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 179static void ath_draintxq(struct ath_softc *, ATH_RESET_TYPE reset_type); 180static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 181static void ath_scan_start(struct ieee80211com *); 182static void ath_scan_end(struct ieee80211com *); 183static void ath_set_channel(struct ieee80211com *); 184#ifdef ATH_ENABLE_11N 185static void ath_update_chw(struct ieee80211com *); 186#endif /* ATH_ENABLE_11N */ 187static void ath_calibrate(void *); 188static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 189static void ath_setup_stationkey(struct ieee80211_node *); 190static void ath_newassoc(struct ieee80211_node *, int); 191static int ath_setregdomain(struct ieee80211com *, 192 struct ieee80211_regdomain *, int, 193 struct ieee80211_channel []); 194static void ath_getradiocaps(struct ieee80211com *, int, int *, 195 struct ieee80211_channel []); 196static int ath_getchannels(struct ath_softc *); 197 198static int ath_rate_setup(struct ath_softc *, u_int mode); 199static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 200 201static void ath_announce(struct ath_softc *); 202 203static void ath_dfs_tasklet(void *, int); 204 205#ifdef IEEE80211_SUPPORT_TDMA 206#include <dev/ath/if_ath_tdma.h> 207#endif 208 209#if 0 210#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */ 211#define TDMA_LPF_LEN 6 212#define TDMA_DUMMY_MARKER 0x127 213#define TDMA_EP_MUL(x, mul) ((x) * (mul)) 214#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER)) 215#define TDMA_LPF(x, y, len) \ 216 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y)) 217#define TDMA_SAMPLE(x, y) do { \ 218 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \ 219} while (0) 220#define TDMA_EP_RND(x,mul) \ 221 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 222#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER) 223#endif /* IEEE80211_SUPPORT_TDMA */ 224 225SYSCTL_DECL(_hw_ath); 226 227/* XXX validate sysctl values */ 228static int ath_longcalinterval = 30; /* long cals every 30 secs */ 229SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 230 0, "long chip calibration interval (secs)"); 231static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 232SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 233 0, "short chip calibration interval (msecs)"); 234static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 235SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 236 0, "reset chip calibration results (secs)"); 237static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 238SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 239 0, "ANI calibration (msecs)"); 240 241static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 242SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 243 0, "rx buffers allocated"); 244TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); 245static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 246SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 247 0, "tx buffers allocated"); 248TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); 249static int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 250SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, 251 0, "tx (mgmt) buffers allocated"); 252TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); 253 254int ath_bstuck_threshold = 4; /* max missed beacons */ 255SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 256 0, "max missed beacon xmits before chip reset"); 257 258MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 259 260#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 261#define HAL_MODE_HT40 \ 262 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 263 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 264int 265ath_attach(u_int16_t devid, struct ath_softc *sc) 266{ 267 struct ifnet *ifp; 268 struct ieee80211com *ic; 269 struct ath_hal *ah = NULL; 270 HAL_STATUS status; 271 int error = 0, i; 272 u_int wmodes; 273 uint8_t macaddr[IEEE80211_ADDR_LEN]; 274 int rx_chainmask, tx_chainmask; 275 276 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 277 278 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 279 if (ifp == NULL) { 280 device_printf(sc->sc_dev, "can not if_alloc()\n"); 281 error = ENOSPC; 282 goto bad; 283 } 284 ic = ifp->if_l2com; 285 286 /* set these up early for if_printf use */ 287 if_initname(ifp, device_get_name(sc->sc_dev), 288 device_get_unit(sc->sc_dev)); 289 290 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 291 sc->sc_eepromdata, &status); 292 if (ah == NULL) { 293 if_printf(ifp, "unable to attach hardware; HAL status %u\n", 294 status); 295 error = ENXIO; 296 goto bad; 297 } 298 sc->sc_ah = ah; 299 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 300#ifdef ATH_DEBUG 301 sc->sc_debug = ath_debug; 302#endif 303 304 /* 305 * Check if the MAC has multi-rate retry support. 306 * We do this by trying to setup a fake extended 307 * descriptor. MAC's that don't have support will 308 * return false w/o doing anything. MAC's that do 309 * support it will return true w/o doing anything. 310 */ 311 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 312 313 /* 314 * Check if the device has hardware counters for PHY 315 * errors. If so we need to enable the MIB interrupt 316 * so we can act on stat triggers. 317 */ 318 if (ath_hal_hwphycounters(ah)) 319 sc->sc_needmib = 1; 320 321 /* 322 * Get the hardware key cache size. 323 */ 324 sc->sc_keymax = ath_hal_keycachesize(ah); 325 if (sc->sc_keymax > ATH_KEYMAX) { 326 if_printf(ifp, "Warning, using only %u of %u key cache slots\n", 327 ATH_KEYMAX, sc->sc_keymax); 328 sc->sc_keymax = ATH_KEYMAX; 329 } 330 /* 331 * Reset the key cache since some parts do not 332 * reset the contents on initial power up. 333 */ 334 for (i = 0; i < sc->sc_keymax; i++) 335 ath_hal_keyreset(ah, i); 336 337 /* 338 * Collect the default channel list. 339 */ 340 error = ath_getchannels(sc); 341 if (error != 0) 342 goto bad; 343 344 /* 345 * Setup rate tables for all potential media types. 346 */ 347 ath_rate_setup(sc, IEEE80211_MODE_11A); 348 ath_rate_setup(sc, IEEE80211_MODE_11B); 349 ath_rate_setup(sc, IEEE80211_MODE_11G); 350 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 351 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 352 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 353 ath_rate_setup(sc, IEEE80211_MODE_11NA); 354 ath_rate_setup(sc, IEEE80211_MODE_11NG); 355 ath_rate_setup(sc, IEEE80211_MODE_HALF); 356 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 357 358 /* NB: setup here so ath_rate_update is happy */ 359 ath_setcurmode(sc, IEEE80211_MODE_11A); 360 361 /* 362 * Allocate tx+rx descriptors and populate the lists. 363 */ 364 error = ath_desc_alloc(sc); 365 if (error != 0) { 366 if_printf(ifp, "failed to allocate descriptors: %d\n", error); 367 goto bad; 368 } 369 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 370 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 371 372 ATH_TXBUF_LOCK_INIT(sc); 373 374 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 375 taskqueue_thread_enqueue, &sc->sc_tq); 376 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, 377 "%s taskq", ifp->if_xname); 378 379 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_tasklet, sc); 380 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 381 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 382 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 383 TASK_INIT(&sc->sc_txqtask,0, ath_txq_sched_tasklet, sc); 384 TASK_INIT(&sc->sc_fataltask,0, ath_fatal_proc, sc); 385 386 /* 387 * Allocate hardware transmit queues: one queue for 388 * beacon frames and one data queue for each QoS 389 * priority. Note that the hal handles resetting 390 * these queues at the needed time. 391 * 392 * XXX PS-Poll 393 */ 394 sc->sc_bhalq = ath_beaconq_setup(ah); 395 if (sc->sc_bhalq == (u_int) -1) { 396 if_printf(ifp, "unable to setup a beacon xmit queue!\n"); 397 error = EIO; 398 goto bad2; 399 } 400 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 401 if (sc->sc_cabq == NULL) { 402 if_printf(ifp, "unable to setup CAB xmit queue!\n"); 403 error = EIO; 404 goto bad2; 405 } 406 /* NB: insure BK queue is the lowest priority h/w queue */ 407 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 408 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", 409 ieee80211_wme_acnames[WME_AC_BK]); 410 error = EIO; 411 goto bad2; 412 } 413 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 414 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 415 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 416 /* 417 * Not enough hardware tx queues to properly do WME; 418 * just punt and assign them all to the same h/w queue. 419 * We could do a better job of this if, for example, 420 * we allocate queues when we switch from station to 421 * AP mode. 422 */ 423 if (sc->sc_ac2q[WME_AC_VI] != NULL) 424 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 425 if (sc->sc_ac2q[WME_AC_BE] != NULL) 426 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 427 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 428 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 429 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 430 } 431 432 /* 433 * Special case certain configurations. Note the 434 * CAB queue is handled by these specially so don't 435 * include them when checking the txq setup mask. 436 */ 437 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 438 case 0x01: 439 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 440 break; 441 case 0x0f: 442 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 443 break; 444 default: 445 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 446 break; 447 } 448 449 /* 450 * Setup rate control. Some rate control modules 451 * call back to change the anntena state so expose 452 * the necessary entry points. 453 * XXX maybe belongs in struct ath_ratectrl? 454 */ 455 sc->sc_setdefantenna = ath_setdefantenna; 456 sc->sc_rc = ath_rate_attach(sc); 457 if (sc->sc_rc == NULL) { 458 error = EIO; 459 goto bad2; 460 } 461 462 /* Attach DFS module */ 463 if (! ath_dfs_attach(sc)) { 464 device_printf(sc->sc_dev, 465 "%s: unable to attach DFS\n", __func__); 466 error = EIO; 467 goto bad2; 468 } 469 470 /* Start DFS processing tasklet */ 471 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 472 473 /* Configure LED state */ 474 sc->sc_blinking = 0; 475 sc->sc_ledstate = 1; 476 sc->sc_ledon = 0; /* low true */ 477 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 478 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); 479 480 /* 481 * Don't setup hardware-based blinking. 482 * 483 * Although some NICs may have this configured in the 484 * default reset register values, the user may wish 485 * to alter which pins have which function. 486 * 487 * The reference driver attaches the MAC network LED to GPIO1 and 488 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 489 * NIC has these reversed. 490 */ 491 sc->sc_hardled = (1 == 0); 492 sc->sc_led_net_pin = -1; 493 sc->sc_led_pwr_pin = -1; 494 /* 495 * Auto-enable soft led processing for IBM cards and for 496 * 5211 minipci cards. Users can also manually enable/disable 497 * support with a sysctl. 498 */ 499 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 500 ath_led_config(sc); 501 ath_hal_setledstate(ah, HAL_LED_INIT); 502 503 ifp->if_softc = sc; 504 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 505 ifp->if_start = ath_start; 506 ifp->if_ioctl = ath_ioctl; 507 ifp->if_init = ath_init; 508 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 509 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 510 IFQ_SET_READY(&ifp->if_snd); 511 512 ic->ic_ifp = ifp; 513 /* XXX not right but it's not used anywhere important */ 514 ic->ic_phytype = IEEE80211_T_OFDM; 515 ic->ic_opmode = IEEE80211_M_STA; 516 ic->ic_caps = 517 IEEE80211_C_STA /* station mode */ 518 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 519 | IEEE80211_C_HOSTAP /* hostap mode */ 520 | IEEE80211_C_MONITOR /* monitor mode */ 521 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 522 | IEEE80211_C_WDS /* 4-address traffic works */ 523 | IEEE80211_C_MBSS /* mesh point link mode */ 524 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 525 | IEEE80211_C_SHSLOT /* short slot time supported */ 526 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 527#ifndef ATH_ENABLE_11N 528 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 529#endif 530 | IEEE80211_C_TXFRAG /* handle tx frags */ 531#ifdef ATH_ENABLE_DFS 532 | IEEE80211_C_DFS /* Enable radar detection */ 533#endif 534 ; 535 /* 536 * Query the hal to figure out h/w crypto support. 537 */ 538 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 539 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 540 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 541 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 542 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 543 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 544 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 545 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 546 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 547 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 548 /* 549 * Check if h/w does the MIC and/or whether the 550 * separate key cache entries are required to 551 * handle both tx+rx MIC keys. 552 */ 553 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 554 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 555 /* 556 * If the h/w supports storing tx+rx MIC keys 557 * in one cache slot automatically enable use. 558 */ 559 if (ath_hal_hastkipsplit(ah) || 560 !ath_hal_settkipsplit(ah, AH_FALSE)) 561 sc->sc_splitmic = 1; 562 /* 563 * If the h/w can do TKIP MIC together with WME then 564 * we use it; otherwise we force the MIC to be done 565 * in software by the net80211 layer. 566 */ 567 if (ath_hal_haswmetkipmic(ah)) 568 sc->sc_wmetkipmic = 1; 569 } 570 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 571 /* 572 * Check for multicast key search support. 573 */ 574 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 575 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 576 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 577 } 578 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 579 /* 580 * Mark key cache slots associated with global keys 581 * as in use. If we knew TKIP was not to be used we 582 * could leave the +32, +64, and +32+64 slots free. 583 */ 584 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 585 setbit(sc->sc_keymap, i); 586 setbit(sc->sc_keymap, i+64); 587 if (sc->sc_splitmic) { 588 setbit(sc->sc_keymap, i+32); 589 setbit(sc->sc_keymap, i+32+64); 590 } 591 } 592 /* 593 * TPC support can be done either with a global cap or 594 * per-packet support. The latter is not available on 595 * all parts. We're a bit pedantic here as all parts 596 * support a global cap. 597 */ 598 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 599 ic->ic_caps |= IEEE80211_C_TXPMGT; 600 601 /* 602 * Mark WME capability only if we have sufficient 603 * hardware queues to do proper priority scheduling. 604 */ 605 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 606 ic->ic_caps |= IEEE80211_C_WME; 607 /* 608 * Check for misc other capabilities. 609 */ 610 if (ath_hal_hasbursting(ah)) 611 ic->ic_caps |= IEEE80211_C_BURST; 612 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 613 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 614 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 615 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 616 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 617 if (ath_hal_hasfastframes(ah)) 618 ic->ic_caps |= IEEE80211_C_FF; 619 wmodes = ath_hal_getwirelessmodes(ah); 620 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 621 ic->ic_caps |= IEEE80211_C_TURBOP; 622#ifdef IEEE80211_SUPPORT_TDMA 623 if (ath_hal_macversion(ah) > 0x78) { 624 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 625 ic->ic_tdma_update = ath_tdma_update; 626 } 627#endif 628 629 /* 630 * TODO: enforce that at least this many frames are available 631 * in the txbuf list before allowing data frames (raw or 632 * otherwise) to be transmitted. 633 */ 634 sc->sc_txq_data_minfree = 10; 635 /* 636 * Leave this as default to maintain legacy behaviour. 637 * Shortening the cabq/mcastq may end up causing some 638 * undesirable behaviour. 639 */ 640 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 641 642 /* 643 * Allow the TX and RX chainmasks to be overridden by 644 * environment variables and/or device.hints. 645 * 646 * This must be done early - before the hardware is 647 * calibrated or before the 802.11n stream calculation 648 * is done. 649 */ 650 if (resource_int_value(device_get_name(sc->sc_dev), 651 device_get_unit(sc->sc_dev), "rx_chainmask", 652 &rx_chainmask) == 0) { 653 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 654 rx_chainmask); 655 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 656 } 657 if (resource_int_value(device_get_name(sc->sc_dev), 658 device_get_unit(sc->sc_dev), "tx_chainmask", 659 &tx_chainmask) == 0) { 660 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 661 tx_chainmask); 662 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 663 } 664 665#ifdef ATH_ENABLE_11N 666 /* 667 * Query HT capabilities 668 */ 669 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 670 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 671 int rxs, txs; 672 673 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 674 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 675 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 676 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 677 | IEEE80211_HTCAP_MAXAMSDU_3839 678 /* max A-MSDU length */ 679 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 680 ; 681 682 /* 683 * Enable short-GI for HT20 only if the hardware 684 * advertises support. 685 * Notably, anything earlier than the AR9287 doesn't. 686 */ 687 if ((ath_hal_getcapability(ah, 688 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 689 (wmodes & HAL_MODE_HT20)) { 690 device_printf(sc->sc_dev, 691 "[HT] enabling short-GI in 20MHz mode\n"); 692 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 693 } 694 695 if (wmodes & HAL_MODE_HT40) 696 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 697 | IEEE80211_HTCAP_SHORTGI40; 698 699 /* 700 * TX/RX streams need to be taken into account when 701 * negotiating which MCS rates it'll receive and 702 * what MCS rates are available for TX. 703 */ 704 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 705 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 706 707 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 708 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 709 710 ic->ic_txstream = txs; 711 ic->ic_rxstream = rxs; 712 713 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 714 &sc->sc_rts_aggr_limit); 715 if (sc->sc_rts_aggr_limit != (64 * 1024)) 716 device_printf(sc->sc_dev, 717 "[HT] RTS aggregates limited to %d KiB\n", 718 sc->sc_rts_aggr_limit / 1024); 719 720 device_printf(sc->sc_dev, 721 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 722 } 723#endif 724 725 /* 726 * Check if the hardware requires PCI register serialisation. 727 * Some of the Owl based MACs require this. 728 */ 729 if (mp_ncpus > 1 && 730 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 731 0, NULL) == HAL_OK) { 732 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 733 device_printf(sc->sc_dev, 734 "Enabling register serialisation\n"); 735 } 736 737 /* 738 * Indicate we need the 802.11 header padded to a 739 * 32-bit boundary for 4-address and QoS frames. 740 */ 741 ic->ic_flags |= IEEE80211_F_DATAPAD; 742 743 /* 744 * Query the hal about antenna support. 745 */ 746 sc->sc_defant = ath_hal_getdefantenna(ah); 747 748 /* 749 * Not all chips have the VEOL support we want to 750 * use with IBSS beacons; check here for it. 751 */ 752 sc->sc_hasveol = ath_hal_hasveol(ah); 753 754 /* get mac address from hardware */ 755 ath_hal_getmac(ah, macaddr); 756 if (sc->sc_hasbmask) 757 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 758 759 /* NB: used to size node table key mapping array */ 760 ic->ic_max_keyix = sc->sc_keymax; 761 /* call MI attach routine. */ 762 ieee80211_ifattach(ic, macaddr); 763 ic->ic_setregdomain = ath_setregdomain; 764 ic->ic_getradiocaps = ath_getradiocaps; 765 sc->sc_opmode = HAL_M_STA; 766 767 /* override default methods */ 768 ic->ic_newassoc = ath_newassoc; 769 ic->ic_updateslot = ath_updateslot; 770 ic->ic_wme.wme_update = ath_wme_update; 771 ic->ic_vap_create = ath_vap_create; 772 ic->ic_vap_delete = ath_vap_delete; 773 ic->ic_raw_xmit = ath_raw_xmit; 774 ic->ic_update_mcast = ath_update_mcast; 775 ic->ic_update_promisc = ath_update_promisc; 776 ic->ic_node_alloc = ath_node_alloc; 777 sc->sc_node_free = ic->ic_node_free; 778 ic->ic_node_free = ath_node_free; 779 sc->sc_node_cleanup = ic->ic_node_cleanup; 780 ic->ic_node_cleanup = ath_node_cleanup; 781 ic->ic_node_getsignal = ath_node_getsignal; 782 ic->ic_scan_start = ath_scan_start; 783 ic->ic_scan_end = ath_scan_end; 784 ic->ic_set_channel = ath_set_channel; 785#ifdef ATH_ENABLE_11N 786 /* 802.11n specific - but just override anyway */ 787 sc->sc_addba_request = ic->ic_addba_request; 788 sc->sc_addba_response = ic->ic_addba_response; 789 sc->sc_addba_stop = ic->ic_addba_stop; 790 sc->sc_bar_response = ic->ic_bar_response; 791 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 792 793 ic->ic_addba_request = ath_addba_request; 794 ic->ic_addba_response = ath_addba_response; 795 ic->ic_addba_response_timeout = ath_addba_response_timeout; 796 ic->ic_addba_stop = ath_addba_stop; 797 ic->ic_bar_response = ath_bar_response; 798 799 ic->ic_update_chw = ath_update_chw; 800#endif /* ATH_ENABLE_11N */ 801 802 ieee80211_radiotap_attach(ic, 803 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 804 ATH_TX_RADIOTAP_PRESENT, 805 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 806 ATH_RX_RADIOTAP_PRESENT); 807 808 /* 809 * Setup dynamic sysctl's now that country code and 810 * regdomain are available from the hal. 811 */ 812 ath_sysctlattach(sc); 813 ath_sysctl_stats_attach(sc); 814 ath_sysctl_hal_attach(sc); 815 816 if (bootverbose) 817 ieee80211_announce(ic); 818 ath_announce(sc); 819 return 0; 820bad2: 821 ath_tx_cleanup(sc); 822 ath_desc_free(sc); 823bad: 824 if (ah) 825 ath_hal_detach(ah); 826 if (ifp != NULL) 827 if_free(ifp); 828 sc->sc_invalid = 1; 829 return error; 830} 831 832int 833ath_detach(struct ath_softc *sc) 834{ 835 struct ifnet *ifp = sc->sc_ifp; 836 837 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 838 __func__, ifp->if_flags); 839 840 /* 841 * NB: the order of these is important: 842 * o stop the chip so no more interrupts will fire 843 * o call the 802.11 layer before detaching the hal to 844 * insure callbacks into the driver to delete global 845 * key cache entries can be handled 846 * o free the taskqueue which drains any pending tasks 847 * o reclaim the tx queue data structures after calling 848 * the 802.11 layer as we'll get called back to reclaim 849 * node state and potentially want to use them 850 * o to cleanup the tx queues the hal is called, so detach 851 * it last 852 * Other than that, it's straightforward... 853 */ 854 ath_stop(ifp); 855 ieee80211_ifdetach(ifp->if_l2com); 856 taskqueue_free(sc->sc_tq); 857#ifdef ATH_TX99_DIAG 858 if (sc->sc_tx99 != NULL) 859 sc->sc_tx99->detach(sc->sc_tx99); 860#endif 861 ath_rate_detach(sc->sc_rc); 862 863 ath_dfs_detach(sc); 864 ath_desc_free(sc); 865 ath_tx_cleanup(sc); 866 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 867 if_free(ifp); 868 869 return 0; 870} 871 872/* 873 * MAC address handling for multiple BSS on the same radio. 874 * The first vap uses the MAC address from the EEPROM. For 875 * subsequent vap's we set the U/L bit (bit 1) in the MAC 876 * address and use the next six bits as an index. 877 */ 878static void 879assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 880{ 881 int i; 882 883 if (clone && sc->sc_hasbmask) { 884 /* NB: we only do this if h/w supports multiple bssid */ 885 for (i = 0; i < 8; i++) 886 if ((sc->sc_bssidmask & (1<<i)) == 0) 887 break; 888 if (i != 0) 889 mac[0] |= (i << 2)|0x2; 890 } else 891 i = 0; 892 sc->sc_bssidmask |= 1<<i; 893 sc->sc_hwbssidmask[0] &= ~mac[0]; 894 if (i == 0) 895 sc->sc_nbssid0++; 896} 897 898static void 899reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 900{ 901 int i = mac[0] >> 2; 902 uint8_t mask; 903 904 if (i != 0 || --sc->sc_nbssid0 == 0) { 905 sc->sc_bssidmask &= ~(1<<i); 906 /* recalculate bssid mask from remaining addresses */ 907 mask = 0xff; 908 for (i = 1; i < 8; i++) 909 if (sc->sc_bssidmask & (1<<i)) 910 mask &= ~((i<<2)|0x2); 911 sc->sc_hwbssidmask[0] |= mask; 912 } 913} 914 915/* 916 * Assign a beacon xmit slot. We try to space out 917 * assignments so when beacons are staggered the 918 * traffic coming out of the cab q has maximal time 919 * to go out before the next beacon is scheduled. 920 */ 921static int 922assign_bslot(struct ath_softc *sc) 923{ 924 u_int slot, free; 925 926 free = 0; 927 for (slot = 0; slot < ATH_BCBUF; slot++) 928 if (sc->sc_bslot[slot] == NULL) { 929 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 930 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 931 return slot; 932 free = slot; 933 /* NB: keep looking for a double slot */ 934 } 935 return free; 936} 937 938static struct ieee80211vap * 939ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 940 enum ieee80211_opmode opmode, int flags, 941 const uint8_t bssid[IEEE80211_ADDR_LEN], 942 const uint8_t mac0[IEEE80211_ADDR_LEN]) 943{ 944 struct ath_softc *sc = ic->ic_ifp->if_softc; 945 struct ath_vap *avp; 946 struct ieee80211vap *vap; 947 uint8_t mac[IEEE80211_ADDR_LEN]; 948 int needbeacon, error; 949 enum ieee80211_opmode ic_opmode; 950 951 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), 952 M_80211_VAP, M_WAITOK | M_ZERO); 953 needbeacon = 0; 954 IEEE80211_ADDR_COPY(mac, mac0); 955 956 ATH_LOCK(sc); 957 ic_opmode = opmode; /* default to opmode of new vap */ 958 switch (opmode) { 959 case IEEE80211_M_STA: 960 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 961 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 962 goto bad; 963 } 964 if (sc->sc_nvaps) { 965 /* 966 * With multiple vaps we must fall back 967 * to s/w beacon miss handling. 968 */ 969 flags |= IEEE80211_CLONE_NOBEACONS; 970 } 971 if (flags & IEEE80211_CLONE_NOBEACONS) { 972 /* 973 * Station mode w/o beacons are implemented w/ AP mode. 974 */ 975 ic_opmode = IEEE80211_M_HOSTAP; 976 } 977 break; 978 case IEEE80211_M_IBSS: 979 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 980 device_printf(sc->sc_dev, 981 "only 1 ibss vap supported\n"); 982 goto bad; 983 } 984 needbeacon = 1; 985 break; 986 case IEEE80211_M_AHDEMO: 987#ifdef IEEE80211_SUPPORT_TDMA 988 if (flags & IEEE80211_CLONE_TDMA) { 989 if (sc->sc_nvaps != 0) { 990 device_printf(sc->sc_dev, 991 "only 1 tdma vap supported\n"); 992 goto bad; 993 } 994 needbeacon = 1; 995 flags |= IEEE80211_CLONE_NOBEACONS; 996 } 997 /* fall thru... */ 998#endif 999 case IEEE80211_M_MONITOR: 1000 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1001 /* 1002 * Adopt existing mode. Adding a monitor or ahdemo 1003 * vap to an existing configuration is of dubious 1004 * value but should be ok. 1005 */ 1006 /* XXX not right for monitor mode */ 1007 ic_opmode = ic->ic_opmode; 1008 } 1009 break; 1010 case IEEE80211_M_HOSTAP: 1011 case IEEE80211_M_MBSS: 1012 needbeacon = 1; 1013 break; 1014 case IEEE80211_M_WDS: 1015 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1016 device_printf(sc->sc_dev, 1017 "wds not supported in sta mode\n"); 1018 goto bad; 1019 } 1020 /* 1021 * Silently remove any request for a unique 1022 * bssid; WDS vap's always share the local 1023 * mac address. 1024 */ 1025 flags &= ~IEEE80211_CLONE_BSSID; 1026 if (sc->sc_nvaps == 0) 1027 ic_opmode = IEEE80211_M_HOSTAP; 1028 else 1029 ic_opmode = ic->ic_opmode; 1030 break; 1031 default: 1032 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1033 goto bad; 1034 } 1035 /* 1036 * Check that a beacon buffer is available; the code below assumes it. 1037 */ 1038 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1039 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1040 goto bad; 1041 } 1042 1043 /* STA, AHDEMO? */ 1044 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1045 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1046 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1047 } 1048 1049 vap = &avp->av_vap; 1050 /* XXX can't hold mutex across if_alloc */ 1051 ATH_UNLOCK(sc); 1052 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, 1053 bssid, mac); 1054 ATH_LOCK(sc); 1055 if (error != 0) { 1056 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1057 __func__, error); 1058 goto bad2; 1059 } 1060 1061 /* h/w crypto support */ 1062 vap->iv_key_alloc = ath_key_alloc; 1063 vap->iv_key_delete = ath_key_delete; 1064 vap->iv_key_set = ath_key_set; 1065 vap->iv_key_update_begin = ath_key_update_begin; 1066 vap->iv_key_update_end = ath_key_update_end; 1067 1068 /* override various methods */ 1069 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1070 vap->iv_recv_mgmt = ath_recv_mgmt; 1071 vap->iv_reset = ath_reset_vap; 1072 vap->iv_update_beacon = ath_beacon_update; 1073 avp->av_newstate = vap->iv_newstate; 1074 vap->iv_newstate = ath_newstate; 1075 avp->av_bmiss = vap->iv_bmiss; 1076 vap->iv_bmiss = ath_bmiss_vap; 1077 1078 /* Set default parameters */ 1079 1080 /* 1081 * Anything earlier than some AR9300 series MACs don't 1082 * support a smaller MPDU density. 1083 */ 1084 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1085 /* 1086 * All NICs can handle the maximum size, however 1087 * AR5416 based MACs can only TX aggregates w/ RTS 1088 * protection when the total aggregate size is <= 8k. 1089 * However, for now that's enforced by the TX path. 1090 */ 1091 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1092 1093 avp->av_bslot = -1; 1094 if (needbeacon) { 1095 /* 1096 * Allocate beacon state and setup the q for buffered 1097 * multicast frames. We know a beacon buffer is 1098 * available because we checked above. 1099 */ 1100 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1101 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1102 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1103 /* 1104 * Assign the vap to a beacon xmit slot. As above 1105 * this cannot fail to find a free one. 1106 */ 1107 avp->av_bslot = assign_bslot(sc); 1108 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1109 ("beacon slot %u not empty", avp->av_bslot)); 1110 sc->sc_bslot[avp->av_bslot] = vap; 1111 sc->sc_nbcnvaps++; 1112 } 1113 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1114 /* 1115 * Multple vaps are to transmit beacons and we 1116 * have h/w support for TSF adjusting; enable 1117 * use of staggered beacons. 1118 */ 1119 sc->sc_stagbeacons = 1; 1120 } 1121 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1122 } 1123 1124 ic->ic_opmode = ic_opmode; 1125 if (opmode != IEEE80211_M_WDS) { 1126 sc->sc_nvaps++; 1127 if (opmode == IEEE80211_M_STA) 1128 sc->sc_nstavaps++; 1129 if (opmode == IEEE80211_M_MBSS) 1130 sc->sc_nmeshvaps++; 1131 } 1132 switch (ic_opmode) { 1133 case IEEE80211_M_IBSS: 1134 sc->sc_opmode = HAL_M_IBSS; 1135 break; 1136 case IEEE80211_M_STA: 1137 sc->sc_opmode = HAL_M_STA; 1138 break; 1139 case IEEE80211_M_AHDEMO: 1140#ifdef IEEE80211_SUPPORT_TDMA 1141 if (vap->iv_caps & IEEE80211_C_TDMA) { 1142 sc->sc_tdma = 1; 1143 /* NB: disable tsf adjust */ 1144 sc->sc_stagbeacons = 0; 1145 } 1146 /* 1147 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1148 * just ap mode. 1149 */ 1150 /* fall thru... */ 1151#endif 1152 case IEEE80211_M_HOSTAP: 1153 case IEEE80211_M_MBSS: 1154 sc->sc_opmode = HAL_M_HOSTAP; 1155 break; 1156 case IEEE80211_M_MONITOR: 1157 sc->sc_opmode = HAL_M_MONITOR; 1158 break; 1159 default: 1160 /* XXX should not happen */ 1161 break; 1162 } 1163 if (sc->sc_hastsfadd) { 1164 /* 1165 * Configure whether or not TSF adjust should be done. 1166 */ 1167 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1168 } 1169 if (flags & IEEE80211_CLONE_NOBEACONS) { 1170 /* 1171 * Enable s/w beacon miss handling. 1172 */ 1173 sc->sc_swbmiss = 1; 1174 } 1175 ATH_UNLOCK(sc); 1176 1177 /* complete setup */ 1178 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); 1179 return vap; 1180bad2: 1181 reclaim_address(sc, mac); 1182 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1183bad: 1184 free(avp, M_80211_VAP); 1185 ATH_UNLOCK(sc); 1186 return NULL; 1187} 1188 1189static void 1190ath_vap_delete(struct ieee80211vap *vap) 1191{ 1192 struct ieee80211com *ic = vap->iv_ic; 1193 struct ifnet *ifp = ic->ic_ifp; 1194 struct ath_softc *sc = ifp->if_softc; 1195 struct ath_hal *ah = sc->sc_ah; 1196 struct ath_vap *avp = ATH_VAP(vap); 1197 1198 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1199 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1200 /* 1201 * Quiesce the hardware while we remove the vap. In 1202 * particular we need to reclaim all references to 1203 * the vap state by any frames pending on the tx queues. 1204 */ 1205 ath_hal_intrset(ah, 0); /* disable interrupts */ 1206 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1207 /* XXX Do all frames from all vaps/nodes need draining here? */ 1208 ath_stoprecv(sc, 1); /* stop recv side */ 1209 } 1210 1211 ieee80211_vap_detach(vap); 1212 1213 /* 1214 * XXX Danger Will Robinson! Danger! 1215 * 1216 * Because ieee80211_vap_detach() can queue a frame (the station 1217 * diassociate message?) after we've drained the TXQ and 1218 * flushed the software TXQ, we will end up with a frame queued 1219 * to a node whose vap is about to be freed. 1220 * 1221 * To work around this, flush the hardware/software again. 1222 * This may be racy - the ath task may be running and the packet 1223 * may be being scheduled between sw->hw txq. Tsk. 1224 * 1225 * TODO: figure out why a new node gets allocated somewhere around 1226 * here (after the ath_tx_swq() call; and after an ath_stop_locked() 1227 * call!) 1228 */ 1229 1230 ath_draintxq(sc, ATH_RESET_DEFAULT); 1231 1232 ATH_LOCK(sc); 1233 /* 1234 * Reclaim beacon state. Note this must be done before 1235 * the vap instance is reclaimed as we may have a reference 1236 * to it in the buffer for the beacon frame. 1237 */ 1238 if (avp->av_bcbuf != NULL) { 1239 if (avp->av_bslot != -1) { 1240 sc->sc_bslot[avp->av_bslot] = NULL; 1241 sc->sc_nbcnvaps--; 1242 } 1243 ath_beacon_return(sc, avp->av_bcbuf); 1244 avp->av_bcbuf = NULL; 1245 if (sc->sc_nbcnvaps == 0) { 1246 sc->sc_stagbeacons = 0; 1247 if (sc->sc_hastsfadd) 1248 ath_hal_settsfadjust(sc->sc_ah, 0); 1249 } 1250 /* 1251 * Reclaim any pending mcast frames for the vap. 1252 */ 1253 ath_tx_draintxq(sc, &avp->av_mcastq); 1254 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); 1255 } 1256 /* 1257 * Update bookkeeping. 1258 */ 1259 if (vap->iv_opmode == IEEE80211_M_STA) { 1260 sc->sc_nstavaps--; 1261 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1262 sc->sc_swbmiss = 0; 1263 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1264 vap->iv_opmode == IEEE80211_M_MBSS) { 1265 reclaim_address(sc, vap->iv_myaddr); 1266 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1267 if (vap->iv_opmode == IEEE80211_M_MBSS) 1268 sc->sc_nmeshvaps--; 1269 } 1270 if (vap->iv_opmode != IEEE80211_M_WDS) 1271 sc->sc_nvaps--; 1272#ifdef IEEE80211_SUPPORT_TDMA 1273 /* TDMA operation ceases when the last vap is destroyed */ 1274 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1275 sc->sc_tdma = 0; 1276 sc->sc_swbmiss = 0; 1277 } 1278#endif 1279 free(avp, M_80211_VAP); 1280 1281 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1282 /* 1283 * Restart rx+tx machines if still running (RUNNING will 1284 * be reset if we just destroyed the last vap). 1285 */ 1286 if (ath_startrecv(sc) != 0) 1287 if_printf(ifp, "%s: unable to restart recv logic\n", 1288 __func__); 1289 if (sc->sc_beacons) { /* restart beacons */ 1290#ifdef IEEE80211_SUPPORT_TDMA 1291 if (sc->sc_tdma) 1292 ath_tdma_config(sc, NULL); 1293 else 1294#endif 1295 ath_beacon_config(sc, NULL); 1296 } 1297 ath_hal_intrset(ah, sc->sc_imask); 1298 } 1299 ATH_UNLOCK(sc); 1300} 1301 1302void 1303ath_suspend(struct ath_softc *sc) 1304{ 1305 struct ifnet *ifp = sc->sc_ifp; 1306 struct ieee80211com *ic = ifp->if_l2com; 1307 1308 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1309 __func__, ifp->if_flags); 1310 1311 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; 1312 1313 ieee80211_suspend_all(ic); 1314 /* 1315 * NB: don't worry about putting the chip in low power 1316 * mode; pci will power off our socket on suspend and 1317 * CardBus detaches the device. 1318 */ 1319 1320 /* 1321 * XXX ensure none of the taskqueues are running 1322 * XXX ensure sc_invalid is 1 1323 * XXX ensure the calibration callout is disabled 1324 */ 1325 1326 /* Disable the PCIe PHY, complete with workarounds */ 1327 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1328} 1329 1330/* 1331 * Reset the key cache since some parts do not reset the 1332 * contents on resume. First we clear all entries, then 1333 * re-load keys that the 802.11 layer assumes are setup 1334 * in h/w. 1335 */ 1336static void 1337ath_reset_keycache(struct ath_softc *sc) 1338{ 1339 struct ifnet *ifp = sc->sc_ifp; 1340 struct ieee80211com *ic = ifp->if_l2com; 1341 struct ath_hal *ah = sc->sc_ah; 1342 int i; 1343 1344 for (i = 0; i < sc->sc_keymax; i++) 1345 ath_hal_keyreset(ah, i); 1346 ieee80211_crypto_reload_keys(ic); 1347} 1348 1349void 1350ath_resume(struct ath_softc *sc) 1351{ 1352 struct ifnet *ifp = sc->sc_ifp; 1353 struct ieee80211com *ic = ifp->if_l2com; 1354 struct ath_hal *ah = sc->sc_ah; 1355 HAL_STATUS status; 1356 1357 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1358 __func__, ifp->if_flags); 1359 1360 /* Re-enable PCIe, re-enable the PCIe bus */ 1361 ath_hal_enablepcie(ah, 0, 0); 1362 1363 /* 1364 * Must reset the chip before we reload the 1365 * keycache as we were powered down on suspend. 1366 */ 1367 ath_hal_reset(ah, sc->sc_opmode, 1368 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1369 AH_FALSE, &status); 1370 ath_reset_keycache(sc); 1371 1372 /* Let DFS at it in case it's a DFS channel */ 1373 ath_dfs_radar_enable(sc, ic->ic_curchan); 1374 1375 /* Restore the LED configuration */ 1376 ath_led_config(sc); 1377 ath_hal_setledstate(ah, HAL_LED_INIT); 1378 1379 if (sc->sc_resume_up) 1380 ieee80211_resume_all(ic); 1381 1382 /* XXX beacons ? */ 1383} 1384 1385void 1386ath_shutdown(struct ath_softc *sc) 1387{ 1388 struct ifnet *ifp = sc->sc_ifp; 1389 1390 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", 1391 __func__, ifp->if_flags); 1392 1393 ath_stop(ifp); 1394 /* NB: no point powering down chip as we're about to reboot */ 1395} 1396 1397/* 1398 * Interrupt handler. Most of the actual processing is deferred. 1399 */ 1400void 1401ath_intr(void *arg) 1402{ 1403 struct ath_softc *sc = arg; 1404 struct ifnet *ifp = sc->sc_ifp; 1405 struct ath_hal *ah = sc->sc_ah; 1406 HAL_INT status = 0; 1407 uint32_t txqs; 1408 1409 /* 1410 * If we're inside a reset path, just print a warning and 1411 * clear the ISR. The reset routine will finish it for us. 1412 */ 1413 ATH_PCU_LOCK(sc); 1414 if (sc->sc_inreset_cnt) { 1415 HAL_INT status; 1416 ath_hal_getisr(ah, &status); /* clear ISR */ 1417 ath_hal_intrset(ah, 0); /* disable further intr's */ 1418 DPRINTF(sc, ATH_DEBUG_ANY, 1419 "%s: in reset, ignoring: status=0x%x\n", 1420 __func__, status); 1421 ATH_PCU_UNLOCK(sc); 1422 return; 1423 } 1424 1425 if (sc->sc_invalid) { 1426 /* 1427 * The hardware is not ready/present, don't touch anything. 1428 * Note this can happen early on if the IRQ is shared. 1429 */ 1430 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 1431 ATH_PCU_UNLOCK(sc); 1432 return; 1433 } 1434 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 1435 ATH_PCU_UNLOCK(sc); 1436 return; 1437 } 1438 1439 if ((ifp->if_flags & IFF_UP) == 0 || 1440 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1441 HAL_INT status; 1442 1443 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1444 __func__, ifp->if_flags); 1445 ath_hal_getisr(ah, &status); /* clear ISR */ 1446 ath_hal_intrset(ah, 0); /* disable further intr's */ 1447 ATH_PCU_UNLOCK(sc); 1448 return; 1449 } 1450 1451 /* 1452 * Figure out the reason(s) for the interrupt. Note 1453 * that the hal returns a pseudo-ISR that may include 1454 * bits we haven't explicitly enabled so we mask the 1455 * value to insure we only process bits we requested. 1456 */ 1457 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 1458 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 1459 CTR1(ATH_KTR_INTR, "ath_intr: mask=0x%.8x", status); 1460#ifdef ATH_KTR_INTR_DEBUG 1461 CTR5(ATH_KTR_INTR, 1462 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 1463 ah->ah_intrstate[0], 1464 ah->ah_intrstate[1], 1465 ah->ah_intrstate[2], 1466 ah->ah_intrstate[3], 1467 ah->ah_intrstate[6]); 1468#endif 1469 1470 /* Squirrel away SYNC interrupt debugging */ 1471 if (ah->ah_syncstate != 0) { 1472 int i; 1473 for (i = 0; i < 32; i++) 1474 if (ah->ah_syncstate & (i << i)) 1475 sc->sc_intr_stats.sync_intr[i]++; 1476 } 1477 1478 status &= sc->sc_imask; /* discard unasked for bits */ 1479 1480 /* Short-circuit un-handled interrupts */ 1481 if (status == 0x0) { 1482 ATH_PCU_UNLOCK(sc); 1483 return; 1484 } 1485 1486 /* 1487 * Take a note that we're inside the interrupt handler, so 1488 * the reset routines know to wait. 1489 */ 1490 sc->sc_intr_cnt++; 1491 ATH_PCU_UNLOCK(sc); 1492 1493 /* 1494 * Handle the interrupt. We won't run concurrent with the reset 1495 * or channel change routines as they'll wait for sc_intr_cnt 1496 * to be 0 before continuing. 1497 */ 1498 if (status & HAL_INT_FATAL) { 1499 sc->sc_stats.ast_hardware++; 1500 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 1501 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 1502 } else { 1503 if (status & HAL_INT_SWBA) { 1504 /* 1505 * Software beacon alert--time to send a beacon. 1506 * Handle beacon transmission directly; deferring 1507 * this is too slow to meet timing constraints 1508 * under load. 1509 */ 1510#ifdef IEEE80211_SUPPORT_TDMA 1511 if (sc->sc_tdma) { 1512 if (sc->sc_tdmaswba == 0) { 1513 struct ieee80211com *ic = ifp->if_l2com; 1514 struct ieee80211vap *vap = 1515 TAILQ_FIRST(&ic->ic_vaps); 1516 ath_tdma_beacon_send(sc, vap); 1517 sc->sc_tdmaswba = 1518 vap->iv_tdma->tdma_bintval; 1519 } else 1520 sc->sc_tdmaswba--; 1521 } else 1522#endif 1523 { 1524 ath_beacon_proc(sc, 0); 1525#ifdef IEEE80211_SUPPORT_SUPERG 1526 /* 1527 * Schedule the rx taskq in case there's no 1528 * traffic so any frames held on the staging 1529 * queue are aged and potentially flushed. 1530 */ 1531 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1532#endif 1533 } 1534 } 1535 if (status & HAL_INT_RXEOL) { 1536 int imask; 1537 CTR0(ATH_KTR_ERR, "ath_intr: RXEOL"); 1538 ATH_PCU_LOCK(sc); 1539 /* 1540 * NB: the hardware should re-read the link when 1541 * RXE bit is written, but it doesn't work at 1542 * least on older hardware revs. 1543 */ 1544 sc->sc_stats.ast_rxeol++; 1545 /* 1546 * Disable RXEOL/RXORN - prevent an interrupt 1547 * storm until the PCU logic can be reset. 1548 * In case the interface is reset some other 1549 * way before "sc_kickpcu" is called, don't 1550 * modify sc_imask - that way if it is reset 1551 * by a call to ath_reset() somehow, the 1552 * interrupt mask will be correctly reprogrammed. 1553 */ 1554 imask = sc->sc_imask; 1555 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 1556 ath_hal_intrset(ah, imask); 1557 /* 1558 * Only blank sc_rxlink if we've not yet kicked 1559 * the PCU. 1560 * 1561 * This isn't entirely correct - the correct solution 1562 * would be to have a PCU lock and engage that for 1563 * the duration of the PCU fiddling; which would include 1564 * running the RX process. Otherwise we could end up 1565 * messing up the RX descriptor chain and making the 1566 * RX desc list much shorter. 1567 */ 1568 if (! sc->sc_kickpcu) 1569 sc->sc_rxlink = NULL; 1570 sc->sc_kickpcu = 1; 1571 /* 1572 * Enqueue an RX proc, to handled whatever 1573 * is in the RX queue. 1574 * This will then kick the PCU. 1575 */ 1576 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1577 ATH_PCU_UNLOCK(sc); 1578 } 1579 if (status & HAL_INT_TXURN) { 1580 sc->sc_stats.ast_txurn++; 1581 /* bump tx trigger level */ 1582 ath_hal_updatetxtriglevel(ah, AH_TRUE); 1583 } 1584 if (status & HAL_INT_RX) { 1585 sc->sc_stats.ast_rx_intr++; 1586 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); 1587 } 1588 if (status & HAL_INT_TX) { 1589 sc->sc_stats.ast_tx_intr++; 1590 /* 1591 * Grab all the currently set bits in the HAL txq bitmap 1592 * and blank them. This is the only place we should be 1593 * doing this. 1594 */ 1595 ATH_PCU_LOCK(sc); 1596 txqs = 0xffffffff; 1597 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 1598 sc->sc_txq_active |= txqs; 1599 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 1600 ATH_PCU_UNLOCK(sc); 1601 } 1602 if (status & HAL_INT_BMISS) { 1603 sc->sc_stats.ast_bmiss++; 1604 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 1605 } 1606 if (status & HAL_INT_GTT) 1607 sc->sc_stats.ast_tx_timeout++; 1608 if (status & HAL_INT_CST) 1609 sc->sc_stats.ast_tx_cst++; 1610 if (status & HAL_INT_MIB) { 1611 sc->sc_stats.ast_mib++; 1612 ATH_PCU_LOCK(sc); 1613 /* 1614 * Disable interrupts until we service the MIB 1615 * interrupt; otherwise it will continue to fire. 1616 */ 1617 ath_hal_intrset(ah, 0); 1618 /* 1619 * Let the hal handle the event. We assume it will 1620 * clear whatever condition caused the interrupt. 1621 */ 1622 ath_hal_mibevent(ah, &sc->sc_halstats); 1623 /* 1624 * Don't reset the interrupt if we've just 1625 * kicked the PCU, or we may get a nested 1626 * RXEOL before the rxproc has had a chance 1627 * to run. 1628 */ 1629 if (sc->sc_kickpcu == 0) 1630 ath_hal_intrset(ah, sc->sc_imask); 1631 ATH_PCU_UNLOCK(sc); 1632 } 1633 if (status & HAL_INT_RXORN) { 1634 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 1635 CTR0(ATH_KTR_ERR, "ath_intr: RXORN"); 1636 sc->sc_stats.ast_rxorn++; 1637 } 1638 } 1639 ATH_PCU_LOCK(sc); 1640 sc->sc_intr_cnt--; 1641 ATH_PCU_UNLOCK(sc); 1642} 1643 1644static void 1645ath_fatal_proc(void *arg, int pending) 1646{ 1647 struct ath_softc *sc = arg; 1648 struct ifnet *ifp = sc->sc_ifp; 1649 u_int32_t *state; 1650 u_int32_t len; 1651 void *sp; 1652 1653 if_printf(ifp, "hardware error; resetting\n"); 1654 /* 1655 * Fatal errors are unrecoverable. Typically these 1656 * are caused by DMA errors. Collect h/w state from 1657 * the hal so we can diagnose what's going on. 1658 */ 1659 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 1660 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 1661 state = sp; 1662 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", 1663 state[0], state[1] , state[2], state[3], 1664 state[4], state[5]); 1665 } 1666 ath_reset(ifp, ATH_RESET_NOLOSS); 1667} 1668 1669static void 1670ath_bmiss_vap(struct ieee80211vap *vap) 1671{ 1672 /* 1673 * Workaround phantom bmiss interrupts by sanity-checking 1674 * the time of our last rx'd frame. If it is within the 1675 * beacon miss interval then ignore the interrupt. If it's 1676 * truly a bmiss we'll get another interrupt soon and that'll 1677 * be dispatched up for processing. Note this applies only 1678 * for h/w beacon miss events. 1679 */ 1680 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 1681 struct ifnet *ifp = vap->iv_ic->ic_ifp; 1682 struct ath_softc *sc = ifp->if_softc; 1683 u_int64_t lastrx = sc->sc_lastrx; 1684 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 1685 /* XXX should take a locked ref to iv_bss */ 1686 u_int bmisstimeout = 1687 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 1688 1689 DPRINTF(sc, ATH_DEBUG_BEACON, 1690 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 1691 __func__, (unsigned long long) tsf, 1692 (unsigned long long)(tsf - lastrx), 1693 (unsigned long long) lastrx, bmisstimeout); 1694 1695 if (tsf - lastrx <= bmisstimeout) { 1696 sc->sc_stats.ast_bmiss_phantom++; 1697 return; 1698 } 1699 } 1700 ATH_VAP(vap)->av_bmiss(vap); 1701} 1702 1703static int 1704ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 1705{ 1706 uint32_t rsize; 1707 void *sp; 1708 1709 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 1710 return 0; 1711 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 1712 *hangs = *(uint32_t *)sp; 1713 return 1; 1714} 1715 1716static void 1717ath_bmiss_proc(void *arg, int pending) 1718{ 1719 struct ath_softc *sc = arg; 1720 struct ifnet *ifp = sc->sc_ifp; 1721 uint32_t hangs; 1722 1723 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 1724 1725 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 1726 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); 1727 ath_reset(ifp, ATH_RESET_NOLOSS); 1728 } else 1729 ieee80211_beacon_miss(ifp->if_l2com); 1730} 1731 1732/* 1733 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 1734 * calcs together with WME. If necessary disable the crypto 1735 * hardware and mark the 802.11 state so keys will be setup 1736 * with the MIC work done in software. 1737 */ 1738static void 1739ath_settkipmic(struct ath_softc *sc) 1740{ 1741 struct ifnet *ifp = sc->sc_ifp; 1742 struct ieee80211com *ic = ifp->if_l2com; 1743 1744 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 1745 if (ic->ic_flags & IEEE80211_F_WME) { 1746 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 1747 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 1748 } else { 1749 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 1750 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 1751 } 1752 } 1753} 1754 1755static void 1756ath_init(void *arg) 1757{ 1758 struct ath_softc *sc = (struct ath_softc *) arg; 1759 struct ifnet *ifp = sc->sc_ifp; 1760 struct ieee80211com *ic = ifp->if_l2com; 1761 struct ath_hal *ah = sc->sc_ah; 1762 HAL_STATUS status; 1763 1764 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", 1765 __func__, ifp->if_flags); 1766 1767 ATH_LOCK(sc); 1768 /* 1769 * Stop anything previously setup. This is safe 1770 * whether this is the first time through or not. 1771 */ 1772 ath_stop_locked(ifp); 1773 1774 /* 1775 * The basic interface to setting the hardware in a good 1776 * state is ``reset''. On return the hardware is known to 1777 * be powered up and with interrupts disabled. This must 1778 * be followed by initialization of the appropriate bits 1779 * and then setup of the interrupt mask. 1780 */ 1781 ath_settkipmic(sc); 1782 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { 1783 if_printf(ifp, "unable to reset hardware; hal status %u\n", 1784 status); 1785 ATH_UNLOCK(sc); 1786 return; 1787 } 1788 ath_chan_change(sc, ic->ic_curchan); 1789 1790 /* Let DFS at it in case it's a DFS channel */ 1791 ath_dfs_radar_enable(sc, ic->ic_curchan); 1792 1793 /* 1794 * Likewise this is set during reset so update 1795 * state cached in the driver. 1796 */ 1797 sc->sc_diversity = ath_hal_getdiversity(ah); 1798 sc->sc_lastlongcal = 0; 1799 sc->sc_resetcal = 1; 1800 sc->sc_lastcalreset = 0; 1801 sc->sc_lastani = 0; 1802 sc->sc_lastshortcal = 0; 1803 sc->sc_doresetcal = AH_FALSE; 1804 /* 1805 * Beacon timers were cleared here; give ath_newstate() 1806 * a hint that the beacon timers should be poked when 1807 * things transition to the RUN state. 1808 */ 1809 sc->sc_beacons = 0; 1810 1811 /* 1812 * Initial aggregation settings. 1813 */ 1814 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH; 1815 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1816 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1817 1818 /* 1819 * Setup the hardware after reset: the key cache 1820 * is filled as needed and the receive engine is 1821 * set going. Frame transmit is handled entirely 1822 * in the frame output path; there's nothing to do 1823 * here except setup the interrupt mask. 1824 */ 1825 if (ath_startrecv(sc) != 0) { 1826 if_printf(ifp, "unable to start recv logic\n"); 1827 ATH_UNLOCK(sc); 1828 return; 1829 } 1830 1831 /* 1832 * Enable interrupts. 1833 */ 1834 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 1835 | HAL_INT_RXEOL | HAL_INT_RXORN 1836 | HAL_INT_FATAL | HAL_INT_GLOBAL; 1837 /* 1838 * Enable MIB interrupts when there are hardware phy counters. 1839 * Note we only do this (at the moment) for station mode. 1840 */ 1841 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 1842 sc->sc_imask |= HAL_INT_MIB; 1843 1844 /* Enable global TX timeout and carrier sense timeout if available */ 1845 if (ath_hal_gtxto_supported(ah)) 1846 sc->sc_imask |= HAL_INT_GTT; 1847 1848 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 1849 __func__, sc->sc_imask); 1850 1851 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1852 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 1853 ath_hal_intrset(ah, sc->sc_imask); 1854 1855 ATH_UNLOCK(sc); 1856 1857#ifdef ATH_TX99_DIAG 1858 if (sc->sc_tx99 != NULL) 1859 sc->sc_tx99->start(sc->sc_tx99); 1860 else 1861#endif 1862 ieee80211_start_all(ic); /* start all vap's */ 1863} 1864 1865static void 1866ath_stop_locked(struct ifnet *ifp) 1867{ 1868 struct ath_softc *sc = ifp->if_softc; 1869 struct ath_hal *ah = sc->sc_ah; 1870 1871 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", 1872 __func__, sc->sc_invalid, ifp->if_flags); 1873 1874 ATH_LOCK_ASSERT(sc); 1875 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1876 /* 1877 * Shutdown the hardware and driver: 1878 * reset 802.11 state machine 1879 * turn off timers 1880 * disable interrupts 1881 * turn off the radio 1882 * clear transmit machinery 1883 * clear receive machinery 1884 * drain and release tx queues 1885 * reclaim beacon resources 1886 * power down hardware 1887 * 1888 * Note that some of this work is not possible if the 1889 * hardware is gone (invalid). 1890 */ 1891#ifdef ATH_TX99_DIAG 1892 if (sc->sc_tx99 != NULL) 1893 sc->sc_tx99->stop(sc->sc_tx99); 1894#endif 1895 callout_stop(&sc->sc_wd_ch); 1896 sc->sc_wd_timer = 0; 1897 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1898 if (!sc->sc_invalid) { 1899 if (sc->sc_softled) { 1900 callout_stop(&sc->sc_ledtimer); 1901 ath_hal_gpioset(ah, sc->sc_ledpin, 1902 !sc->sc_ledon); 1903 sc->sc_blinking = 0; 1904 } 1905 ath_hal_intrset(ah, 0); 1906 } 1907 ath_draintxq(sc, ATH_RESET_DEFAULT); 1908 if (!sc->sc_invalid) { 1909 ath_stoprecv(sc, 1); 1910 ath_hal_phydisable(ah); 1911 } else 1912 sc->sc_rxlink = NULL; 1913 ath_beacon_free(sc); /* XXX not needed */ 1914 } 1915} 1916 1917#define MAX_TXRX_ITERATIONS 1000 1918static void 1919ath_txrx_stop_locked(struct ath_softc *sc) 1920{ 1921 int i = MAX_TXRX_ITERATIONS; 1922 1923 ATH_UNLOCK_ASSERT(sc); 1924 ATH_PCU_LOCK_ASSERT(sc); 1925 1926 /* 1927 * Sleep until all the pending operations have completed. 1928 * 1929 * The caller must ensure that reset has been incremented 1930 * or the pending operations may continue being queued. 1931 */ 1932 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 1933 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 1934 if (i <= 0) 1935 break; 1936 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1); 1937 i--; 1938 } 1939 1940 if (i <= 0) 1941 device_printf(sc->sc_dev, 1942 "%s: didn't finish after %d iterations\n", 1943 __func__, MAX_TXRX_ITERATIONS); 1944} 1945#undef MAX_TXRX_ITERATIONS 1946 1947#if 0 1948static void 1949ath_txrx_stop(struct ath_softc *sc) 1950{ 1951 ATH_UNLOCK_ASSERT(sc); 1952 ATH_PCU_UNLOCK_ASSERT(sc); 1953 1954 ATH_PCU_LOCK(sc); 1955 ath_txrx_stop_locked(sc); 1956 ATH_PCU_UNLOCK(sc); 1957} 1958#endif 1959 1960static void 1961ath_txrx_start(struct ath_softc *sc) 1962{ 1963 1964 taskqueue_unblock(sc->sc_tq); 1965} 1966 1967/* 1968 * Grab the reset lock, and wait around until noone else 1969 * is trying to do anything with it. 1970 * 1971 * This is totally horrible but we can't hold this lock for 1972 * long enough to do TX/RX or we end up with net80211/ip stack 1973 * LORs and eventual deadlock. 1974 * 1975 * "dowait" signals whether to spin, waiting for the reset 1976 * lock count to reach 0. This should (for now) only be used 1977 * during the reset path, as the rest of the code may not 1978 * be locking-reentrant enough to behave correctly. 1979 * 1980 * Another, cleaner way should be found to serialise all of 1981 * these operations. 1982 */ 1983#define MAX_RESET_ITERATIONS 10 1984static int 1985ath_reset_grablock(struct ath_softc *sc, int dowait) 1986{ 1987 int w = 0; 1988 int i = MAX_RESET_ITERATIONS; 1989 1990 ATH_PCU_LOCK_ASSERT(sc); 1991 do { 1992 if (sc->sc_inreset_cnt == 0) { 1993 w = 1; 1994 break; 1995 } 1996 if (dowait == 0) { 1997 w = 0; 1998 break; 1999 } 2000 ATH_PCU_UNLOCK(sc); 2001 pause("ath_reset_grablock", 1); 2002 i--; 2003 ATH_PCU_LOCK(sc); 2004 } while (i > 0); 2005 2006 /* 2007 * We always increment the refcounter, regardless 2008 * of whether we succeeded to get it in an exclusive 2009 * way. 2010 */ 2011 sc->sc_inreset_cnt++; 2012 2013 if (i <= 0) 2014 device_printf(sc->sc_dev, 2015 "%s: didn't finish after %d iterations\n", 2016 __func__, MAX_RESET_ITERATIONS); 2017 2018 if (w == 0) 2019 device_printf(sc->sc_dev, 2020 "%s: warning, recursive reset path!\n", 2021 __func__); 2022 2023 return w; 2024} 2025#undef MAX_RESET_ITERATIONS 2026 2027/* 2028 * XXX TODO: write ath_reset_releaselock 2029 */ 2030 2031static void 2032ath_stop(struct ifnet *ifp) 2033{ 2034 struct ath_softc *sc = ifp->if_softc; 2035 2036 ATH_LOCK(sc); 2037 ath_stop_locked(ifp); 2038 ATH_UNLOCK(sc); 2039} 2040 2041/* 2042 * Reset the hardware w/o losing operational state. This is 2043 * basically a more efficient way of doing ath_stop, ath_init, 2044 * followed by state transitions to the current 802.11 2045 * operational state. Used to recover from various errors and 2046 * to reset or reload hardware state. 2047 */ 2048int 2049ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) 2050{ 2051 struct ath_softc *sc = ifp->if_softc; 2052 struct ieee80211com *ic = ifp->if_l2com; 2053 struct ath_hal *ah = sc->sc_ah; 2054 HAL_STATUS status; 2055 int i; 2056 2057 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2058 2059 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2060 ATH_PCU_UNLOCK_ASSERT(sc); 2061 ATH_UNLOCK_ASSERT(sc); 2062 2063 /* Try to (stop any further TX/RX from occuring */ 2064 taskqueue_block(sc->sc_tq); 2065 2066 ATH_PCU_LOCK(sc); 2067 ath_hal_intrset(ah, 0); /* disable interrupts */ 2068 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */ 2069 if (ath_reset_grablock(sc, 1) == 0) { 2070 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2071 __func__); 2072 } 2073 ATH_PCU_UNLOCK(sc); 2074 2075 /* 2076 * Should now wait for pending TX/RX to complete 2077 * and block future ones from occuring. This needs to be 2078 * done before the TX queue is drained. 2079 */ 2080 ath_draintxq(sc, reset_type); /* stop xmit side */ 2081 2082 /* 2083 * Regardless of whether we're doing a no-loss flush or 2084 * not, stop the PCU and handle what's in the RX queue. 2085 * That way frames aren't dropped which shouldn't be. 2086 */ 2087 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2088 ath_rx_proc(sc, 0); 2089 2090 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2091 /* NB: indicate channel change so we do a full reset */ 2092 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) 2093 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", 2094 __func__, status); 2095 sc->sc_diversity = ath_hal_getdiversity(ah); 2096 2097 /* Let DFS at it in case it's a DFS channel */ 2098 ath_dfs_radar_enable(sc, ic->ic_curchan); 2099 2100 if (ath_startrecv(sc) != 0) /* restart recv */ 2101 if_printf(ifp, "%s: unable to start recv logic\n", __func__); 2102 /* 2103 * We may be doing a reset in response to an ioctl 2104 * that changes the channel so update any state that 2105 * might change as a result. 2106 */ 2107 ath_chan_change(sc, ic->ic_curchan); 2108 if (sc->sc_beacons) { /* restart beacons */ 2109#ifdef IEEE80211_SUPPORT_TDMA 2110 if (sc->sc_tdma) 2111 ath_tdma_config(sc, NULL); 2112 else 2113#endif 2114 ath_beacon_config(sc, NULL); 2115 } 2116 2117 /* 2118 * Release the reset lock and re-enable interrupts here. 2119 * If an interrupt was being processed in ath_intr(), 2120 * it would disable interrupts at this point. So we have 2121 * to atomically enable interrupts and decrement the 2122 * reset counter - this way ath_intr() doesn't end up 2123 * disabling interrupts without a corresponding enable 2124 * in the rest or channel change path. 2125 */ 2126 ATH_PCU_LOCK(sc); 2127 sc->sc_inreset_cnt--; 2128 /* XXX only do this if sc_inreset_cnt == 0? */ 2129 ath_hal_intrset(ah, sc->sc_imask); 2130 ATH_PCU_UNLOCK(sc); 2131 2132 /* 2133 * TX and RX can be started here. If it were started with 2134 * sc_inreset_cnt > 0, the TX and RX path would abort. 2135 * Thus if this is a nested call through the reset or 2136 * channel change code, TX completion will occur but 2137 * RX completion and ath_start / ath_tx_start will not 2138 * run. 2139 */ 2140 2141 /* Restart TX/RX as needed */ 2142 ath_txrx_start(sc); 2143 2144 /* XXX Restart TX completion and pending TX */ 2145 if (reset_type == ATH_RESET_NOLOSS) { 2146 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2147 if (ATH_TXQ_SETUP(sc, i)) { 2148 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2149 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2150 ath_txq_sched(sc, &sc->sc_txq[i]); 2151 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2152 } 2153 } 2154 } 2155 2156 /* 2157 * This may have been set during an ath_start() call which 2158 * set this once it detected a concurrent TX was going on. 2159 * So, clear it. 2160 */ 2161 IF_LOCK(&ifp->if_snd); 2162 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2163 IF_UNLOCK(&ifp->if_snd); 2164 2165 /* Handle any frames in the TX queue */ 2166 /* 2167 * XXX should this be done by the caller, rather than 2168 * ath_reset() ? 2169 */ 2170 ath_start(ifp); /* restart xmit */ 2171 return 0; 2172} 2173 2174static int 2175ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2176{ 2177 struct ieee80211com *ic = vap->iv_ic; 2178 struct ifnet *ifp = ic->ic_ifp; 2179 struct ath_softc *sc = ifp->if_softc; 2180 struct ath_hal *ah = sc->sc_ah; 2181 2182 switch (cmd) { 2183 case IEEE80211_IOC_TXPOWER: 2184 /* 2185 * If per-packet TPC is enabled, then we have nothing 2186 * to do; otherwise we need to force the global limit. 2187 * All this can happen directly; no need to reset. 2188 */ 2189 if (!ath_hal_gettpc(ah)) 2190 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2191 return 0; 2192 } 2193 /* XXX? Full or NOLOSS? */ 2194 return ath_reset(ifp, ATH_RESET_FULL); 2195} 2196 2197struct ath_buf * 2198_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2199{ 2200 struct ath_buf *bf; 2201 2202 ATH_TXBUF_LOCK_ASSERT(sc); 2203 2204 if (btype == ATH_BUFTYPE_MGMT) 2205 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 2206 else 2207 bf = TAILQ_FIRST(&sc->sc_txbuf); 2208 2209 if (bf == NULL) { 2210 sc->sc_stats.ast_tx_getnobuf++; 2211 } else { 2212 if (bf->bf_flags & ATH_BUF_BUSY) { 2213 sc->sc_stats.ast_tx_getbusybuf++; 2214 bf = NULL; 2215 } 2216 } 2217 2218 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 2219 if (btype == ATH_BUFTYPE_MGMT) 2220 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 2221 else { 2222 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 2223 sc->sc_txbuf_cnt--; 2224 2225 /* 2226 * This shuldn't happen; however just to be 2227 * safe print a warning and fudge the txbuf 2228 * count. 2229 */ 2230 if (sc->sc_txbuf_cnt < 0) { 2231 device_printf(sc->sc_dev, 2232 "%s: sc_txbuf_cnt < 0?\n", 2233 __func__); 2234 sc->sc_txbuf_cnt = 0; 2235 } 2236 } 2237 } else 2238 bf = NULL; 2239 2240 if (bf == NULL) { 2241 /* XXX should check which list, mgmt or otherwise */ 2242 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 2243 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 2244 "out of xmit buffers" : "xmit buffer busy"); 2245 return NULL; 2246 } 2247 2248 /* XXX TODO: should do this at buffer list initialisation */ 2249 /* XXX (then, ensure the buffer has the right flag set) */ 2250 if (btype == ATH_BUFTYPE_MGMT) 2251 bf->bf_flags |= ATH_BUF_MGMT; 2252 else 2253 bf->bf_flags &= (~ATH_BUF_MGMT); 2254 2255 /* Valid bf here; clear some basic fields */ 2256 bf->bf_next = NULL; /* XXX just to be sure */ 2257 bf->bf_last = NULL; /* XXX again, just to be sure */ 2258 bf->bf_comp = NULL; /* XXX again, just to be sure */ 2259 bzero(&bf->bf_state, sizeof(bf->bf_state)); 2260 2261 return bf; 2262} 2263 2264/* 2265 * When retrying a software frame, buffers marked ATH_BUF_BUSY 2266 * can't be thrown back on the queue as they could still be 2267 * in use by the hardware. 2268 * 2269 * This duplicates the buffer, or returns NULL. 2270 * 2271 * The descriptor is also copied but the link pointers and 2272 * the DMA segments aren't copied; this frame should thus 2273 * be again passed through the descriptor setup/chain routines 2274 * so the link is correct. 2275 * 2276 * The caller must free the buffer using ath_freebuf(). 2277 * 2278 * XXX TODO: this call shouldn't fail as it'll cause packet loss 2279 * XXX in the TX pathway when retries are needed. 2280 * XXX Figure out how to keep some buffers free, or factor the 2281 * XXX number of busy buffers into the xmit path (ath_start()) 2282 * XXX so we don't over-commit. 2283 */ 2284struct ath_buf * 2285ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf) 2286{ 2287 struct ath_buf *tbf; 2288 2289 tbf = ath_getbuf(sc, 2290 (bf->bf_flags & ATH_BUF_MGMT) ? 2291 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 2292 if (tbf == NULL) 2293 return NULL; /* XXX failure? Why? */ 2294 2295 /* Copy basics */ 2296 tbf->bf_next = NULL; 2297 tbf->bf_nseg = bf->bf_nseg; 2298 tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY; 2299 tbf->bf_status = bf->bf_status; 2300 tbf->bf_m = bf->bf_m; 2301 tbf->bf_node = bf->bf_node; 2302 /* will be setup by the chain/setup function */ 2303 tbf->bf_lastds = NULL; 2304 /* for now, last == self */ 2305 tbf->bf_last = tbf; 2306 tbf->bf_comp = bf->bf_comp; 2307 2308 /* NOTE: DMA segments will be setup by the setup/chain functions */ 2309 2310 /* The caller has to re-init the descriptor + links */ 2311 2312 /* Copy state */ 2313 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 2314 2315 return tbf; 2316} 2317 2318struct ath_buf * 2319ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 2320{ 2321 struct ath_buf *bf; 2322 2323 ATH_TXBUF_LOCK(sc); 2324 bf = _ath_getbuf_locked(sc, btype); 2325 /* 2326 * If a mgmt buffer was requested but we're out of those, 2327 * try requesting a normal one. 2328 */ 2329 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 2330 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 2331 ATH_TXBUF_UNLOCK(sc); 2332 if (bf == NULL) { 2333 struct ifnet *ifp = sc->sc_ifp; 2334 2335 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 2336 sc->sc_stats.ast_tx_qstop++; 2337 IF_LOCK(&ifp->if_snd); 2338 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2339 IF_UNLOCK(&ifp->if_snd); 2340 } 2341 return bf; 2342} 2343 2344void 2345ath_start(struct ifnet *ifp) 2346{ 2347 struct ath_softc *sc = ifp->if_softc; 2348 struct ieee80211_node *ni; 2349 struct ath_buf *bf; 2350 struct mbuf *m, *next; 2351 ath_bufhead frags; 2352 2353 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) 2354 return; 2355 2356 /* XXX is it ok to hold the ATH_LOCK here? */ 2357 ATH_PCU_LOCK(sc); 2358 if (sc->sc_inreset_cnt > 0) { 2359 device_printf(sc->sc_dev, 2360 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 2361 ATH_PCU_UNLOCK(sc); 2362 IF_LOCK(&ifp->if_snd); 2363 sc->sc_stats.ast_tx_qstop++; 2364 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2365 IF_UNLOCK(&ifp->if_snd); 2366 return; 2367 } 2368 sc->sc_txstart_cnt++; 2369 ATH_PCU_UNLOCK(sc); 2370 2371 for (;;) { 2372 ATH_TXBUF_LOCK(sc); 2373 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) { 2374 /* XXX increment counter? */ 2375 ATH_TXBUF_UNLOCK(sc); 2376 IF_LOCK(&ifp->if_snd); 2377 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2378 IF_UNLOCK(&ifp->if_snd); 2379 break; 2380 } 2381 ATH_TXBUF_UNLOCK(sc); 2382 2383 /* 2384 * Grab a TX buffer and associated resources. 2385 */ 2386 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 2387 if (bf == NULL) 2388 break; 2389 2390 IFQ_DEQUEUE(&ifp->if_snd, m); 2391 if (m == NULL) { 2392 ATH_TXBUF_LOCK(sc); 2393 ath_returnbuf_head(sc, bf); 2394 ATH_TXBUF_UNLOCK(sc); 2395 break; 2396 } 2397 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 2398 /* 2399 * Check for fragmentation. If this frame 2400 * has been broken up verify we have enough 2401 * buffers to send all the fragments so all 2402 * go out or none... 2403 */ 2404 TAILQ_INIT(&frags); 2405 if ((m->m_flags & M_FRAG) && 2406 !ath_txfrag_setup(sc, &frags, m, ni)) { 2407 DPRINTF(sc, ATH_DEBUG_XMIT, 2408 "%s: out of txfrag buffers\n", __func__); 2409 sc->sc_stats.ast_tx_nofrag++; 2410 ifp->if_oerrors++; 2411 ath_freetx(m); 2412 goto bad; 2413 } 2414 ifp->if_opackets++; 2415 nextfrag: 2416 /* 2417 * Pass the frame to the h/w for transmission. 2418 * Fragmented frames have each frag chained together 2419 * with m_nextpkt. We know there are sufficient ath_buf's 2420 * to send all the frags because of work done by 2421 * ath_txfrag_setup. We leave m_nextpkt set while 2422 * calling ath_tx_start so it can use it to extend the 2423 * the tx duration to cover the subsequent frag and 2424 * so it can reclaim all the mbufs in case of an error; 2425 * ath_tx_start clears m_nextpkt once it commits to 2426 * handing the frame to the hardware. 2427 */ 2428 next = m->m_nextpkt; 2429 if (ath_tx_start(sc, ni, bf, m)) { 2430 bad: 2431 ifp->if_oerrors++; 2432 reclaim: 2433 bf->bf_m = NULL; 2434 bf->bf_node = NULL; 2435 ATH_TXBUF_LOCK(sc); 2436 ath_returnbuf_head(sc, bf); 2437 ath_txfrag_cleanup(sc, &frags, ni); 2438 ATH_TXBUF_UNLOCK(sc); 2439 if (ni != NULL) 2440 ieee80211_free_node(ni); 2441 continue; 2442 } 2443 if (next != NULL) { 2444 /* 2445 * Beware of state changing between frags. 2446 * XXX check sta power-save state? 2447 */ 2448 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 2449 DPRINTF(sc, ATH_DEBUG_XMIT, 2450 "%s: flush fragmented packet, state %s\n", 2451 __func__, 2452 ieee80211_state_name[ni->ni_vap->iv_state]); 2453 ath_freetx(next); 2454 goto reclaim; 2455 } 2456 m = next; 2457 bf = TAILQ_FIRST(&frags); 2458 KASSERT(bf != NULL, ("no buf for txfrag")); 2459 TAILQ_REMOVE(&frags, bf, bf_list); 2460 goto nextfrag; 2461 } 2462 2463 sc->sc_wd_timer = 5; 2464 } 2465 2466 ATH_PCU_LOCK(sc); 2467 sc->sc_txstart_cnt--; 2468 ATH_PCU_UNLOCK(sc); 2469} 2470 2471static int 2472ath_media_change(struct ifnet *ifp) 2473{ 2474 int error = ieee80211_media_change(ifp); 2475 /* NB: only the fixed rate can change and that doesn't need a reset */ 2476 return (error == ENETRESET ? 0 : error); 2477} 2478 2479/* 2480 * Block/unblock tx+rx processing while a key change is done. 2481 * We assume the caller serializes key management operations 2482 * so we only need to worry about synchronization with other 2483 * uses that originate in the driver. 2484 */ 2485static void 2486ath_key_update_begin(struct ieee80211vap *vap) 2487{ 2488 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2489 struct ath_softc *sc = ifp->if_softc; 2490 2491 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2492 taskqueue_block(sc->sc_tq); 2493 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ 2494} 2495 2496static void 2497ath_key_update_end(struct ieee80211vap *vap) 2498{ 2499 struct ifnet *ifp = vap->iv_ic->ic_ifp; 2500 struct ath_softc *sc = ifp->if_softc; 2501 2502 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 2503 IF_UNLOCK(&ifp->if_snd); 2504 taskqueue_unblock(sc->sc_tq); 2505} 2506 2507static void 2508ath_update_promisc(struct ifnet *ifp) 2509{ 2510 struct ath_softc *sc = ifp->if_softc; 2511 u_int32_t rfilt; 2512 2513 /* configure rx filter */ 2514 rfilt = ath_calcrxfilter(sc); 2515 ath_hal_setrxfilter(sc->sc_ah, rfilt); 2516 2517 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 2518} 2519 2520static void 2521ath_update_mcast(struct ifnet *ifp) 2522{ 2523 struct ath_softc *sc = ifp->if_softc; 2524 u_int32_t mfilt[2]; 2525 2526 /* calculate and install multicast filter */ 2527 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2528 struct ifmultiaddr *ifma; 2529 /* 2530 * Merge multicast addresses to form the hardware filter. 2531 */ 2532 mfilt[0] = mfilt[1] = 0; 2533 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ 2534 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2535 caddr_t dl; 2536 u_int32_t val; 2537 u_int8_t pos; 2538 2539 /* calculate XOR of eight 6bit values */ 2540 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2541 val = LE_READ_4(dl + 0); 2542 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2543 val = LE_READ_4(dl + 3); 2544 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 2545 pos &= 0x3f; 2546 mfilt[pos / 32] |= (1 << (pos % 32)); 2547 } 2548 if_maddr_runlock(ifp); 2549 } else 2550 mfilt[0] = mfilt[1] = ~0; 2551 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 2552 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 2553 __func__, mfilt[0], mfilt[1]); 2554} 2555 2556void 2557ath_mode_init(struct ath_softc *sc) 2558{ 2559 struct ifnet *ifp = sc->sc_ifp; 2560 struct ath_hal *ah = sc->sc_ah; 2561 u_int32_t rfilt; 2562 2563 /* configure rx filter */ 2564 rfilt = ath_calcrxfilter(sc); 2565 ath_hal_setrxfilter(ah, rfilt); 2566 2567 /* configure operational mode */ 2568 ath_hal_setopmode(ah); 2569 2570 /* handle any link-level address change */ 2571 ath_hal_setmac(ah, IF_LLADDR(ifp)); 2572 2573 /* calculate and install multicast filter */ 2574 ath_update_mcast(ifp); 2575} 2576 2577/* 2578 * Set the slot time based on the current setting. 2579 */ 2580void 2581ath_setslottime(struct ath_softc *sc) 2582{ 2583 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 2584 struct ath_hal *ah = sc->sc_ah; 2585 u_int usec; 2586 2587 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 2588 usec = 13; 2589 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 2590 usec = 21; 2591 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 2592 /* honor short/long slot time only in 11g */ 2593 /* XXX shouldn't honor on pure g or turbo g channel */ 2594 if (ic->ic_flags & IEEE80211_F_SHSLOT) 2595 usec = HAL_SLOT_TIME_9; 2596 else 2597 usec = HAL_SLOT_TIME_20; 2598 } else 2599 usec = HAL_SLOT_TIME_9; 2600 2601 DPRINTF(sc, ATH_DEBUG_RESET, 2602 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 2603 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 2604 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 2605 2606 ath_hal_setslottime(ah, usec); 2607 sc->sc_updateslot = OK; 2608} 2609 2610/* 2611 * Callback from the 802.11 layer to update the 2612 * slot time based on the current setting. 2613 */ 2614static void 2615ath_updateslot(struct ifnet *ifp) 2616{ 2617 struct ath_softc *sc = ifp->if_softc; 2618 struct ieee80211com *ic = ifp->if_l2com; 2619 2620 /* 2621 * When not coordinating the BSS, change the hardware 2622 * immediately. For other operation we defer the change 2623 * until beacon updates have propagated to the stations. 2624 */ 2625 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 2626 ic->ic_opmode == IEEE80211_M_MBSS) 2627 sc->sc_updateslot = UPDATE; 2628 else 2629 ath_setslottime(sc); 2630} 2631 2632/* 2633 * Append the contents of src to dst; both queues 2634 * are assumed to be locked. 2635 */ 2636void 2637ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 2638{ 2639 2640 ATH_TXQ_LOCK_ASSERT(dst); 2641 ATH_TXQ_LOCK_ASSERT(src); 2642 2643 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 2644 dst->axq_link = src->axq_link; 2645 src->axq_link = NULL; 2646 dst->axq_depth += src->axq_depth; 2647 dst->axq_aggr_depth += src->axq_aggr_depth; 2648 src->axq_depth = 0; 2649 src->axq_aggr_depth = 0; 2650} 2651 2652/* 2653 * Reset the hardware, with no loss. 2654 * 2655 * This can't be used for a general case reset. 2656 */ 2657static void 2658ath_reset_proc(void *arg, int pending) 2659{ 2660 struct ath_softc *sc = arg; 2661 struct ifnet *ifp = sc->sc_ifp; 2662 2663#if 0 2664 if_printf(ifp, "%s: resetting\n", __func__); 2665#endif 2666 ath_reset(ifp, ATH_RESET_NOLOSS); 2667} 2668 2669/* 2670 * Reset the hardware after detecting beacons have stopped. 2671 */ 2672static void 2673ath_bstuck_proc(void *arg, int pending) 2674{ 2675 struct ath_softc *sc = arg; 2676 struct ifnet *ifp = sc->sc_ifp; 2677 uint32_t hangs = 0; 2678 2679 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 2680 if_printf(ifp, "bb hang detected (0x%x)\n", hangs); 2681 2682 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", 2683 sc->sc_bmisscount); 2684 sc->sc_stats.ast_bstuck++; 2685 /* 2686 * This assumes that there's no simultaneous channel mode change 2687 * occuring. 2688 */ 2689 ath_reset(ifp, ATH_RESET_NOLOSS); 2690} 2691 2692static void 2693ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2694{ 2695 bus_addr_t *paddr = (bus_addr_t*) arg; 2696 KASSERT(error == 0, ("error %u on bus_dma callback", error)); 2697 *paddr = segs->ds_addr; 2698} 2699 2700static int 2701ath_descdma_setup(struct ath_softc *sc, 2702 struct ath_descdma *dd, ath_bufhead *head, 2703 const char *name, int nbuf, int ndesc) 2704{ 2705#define DS2PHYS(_dd, _ds) \ 2706 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2707#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ 2708 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) 2709 struct ifnet *ifp = sc->sc_ifp; 2710 uint8_t *ds; 2711 struct ath_buf *bf; 2712 int i, bsize, error; 2713 int desc_len; 2714 2715 desc_len = sizeof(struct ath_desc); 2716 2717 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", 2718 __func__, name, nbuf, ndesc); 2719 2720 dd->dd_name = name; 2721 dd->dd_desc_len = desc_len * nbuf * ndesc; 2722 2723 /* 2724 * Merlin work-around: 2725 * Descriptors that cross the 4KB boundary can't be used. 2726 * Assume one skipped descriptor per 4KB page. 2727 */ 2728 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2729 int numdescpage = 4096 / (desc_len * ndesc); 2730 dd->dd_desc_len = (nbuf / numdescpage + 1) * 4096; 2731 } 2732 2733 /* 2734 * Setup DMA descriptor area. 2735 */ 2736 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 2737 PAGE_SIZE, 0, /* alignment, bounds */ 2738 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2739 BUS_SPACE_MAXADDR, /* highaddr */ 2740 NULL, NULL, /* filter, filterarg */ 2741 dd->dd_desc_len, /* maxsize */ 2742 1, /* nsegments */ 2743 dd->dd_desc_len, /* maxsegsize */ 2744 BUS_DMA_ALLOCNOW, /* flags */ 2745 NULL, /* lockfunc */ 2746 NULL, /* lockarg */ 2747 &dd->dd_dmat); 2748 if (error != 0) { 2749 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); 2750 return error; 2751 } 2752 2753 /* allocate descriptors */ 2754 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); 2755 if (error != 0) { 2756 if_printf(ifp, "unable to create dmamap for %s descriptors, " 2757 "error %u\n", dd->dd_name, error); 2758 goto fail0; 2759 } 2760 2761 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, 2762 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2763 &dd->dd_dmamap); 2764 if (error != 0) { 2765 if_printf(ifp, "unable to alloc memory for %u %s descriptors, " 2766 "error %u\n", nbuf * ndesc, dd->dd_name, error); 2767 goto fail1; 2768 } 2769 2770 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, 2771 dd->dd_desc, dd->dd_desc_len, 2772 ath_load_cb, &dd->dd_desc_paddr, 2773 BUS_DMA_NOWAIT); 2774 if (error != 0) { 2775 if_printf(ifp, "unable to map %s descriptors, error %u\n", 2776 dd->dd_name, error); 2777 goto fail2; 2778 } 2779 2780 ds = (uint8_t *) dd->dd_desc; 2781 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", 2782 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, 2783 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); 2784 2785 /* allocate rx buffers */ 2786 bsize = sizeof(struct ath_buf) * nbuf; 2787 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); 2788 if (bf == NULL) { 2789 if_printf(ifp, "malloc of %s buffers failed, size %u\n", 2790 dd->dd_name, bsize); 2791 goto fail3; 2792 } 2793 dd->dd_bufptr = bf; 2794 2795 TAILQ_INIT(head); 2796 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * desc_len)) { 2797 bf->bf_desc = (struct ath_desc *) ds; 2798 bf->bf_daddr = DS2PHYS(dd, ds); 2799 if (! ath_hal_split4ktrans(sc->sc_ah)) { 2800 /* 2801 * Merlin WAR: Skip descriptor addresses which 2802 * cause 4KB boundary crossing along any point 2803 * in the descriptor. 2804 */ 2805 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, 2806 desc_len * ndesc)) { 2807 /* Start at the next page */ 2808 ds += 0x1000 - (bf->bf_daddr & 0xFFF); 2809 bf->bf_desc = (struct ath_desc *) ds; 2810 bf->bf_daddr = DS2PHYS(dd, ds); 2811 } 2812 } 2813 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 2814 &bf->bf_dmamap); 2815 if (error != 0) { 2816 if_printf(ifp, "unable to create dmamap for %s " 2817 "buffer %u, error %u\n", dd->dd_name, i, error); 2818 ath_descdma_cleanup(sc, dd, head); 2819 return error; 2820 } 2821 bf->bf_lastds = bf->bf_desc; /* Just an initial value */ 2822 TAILQ_INSERT_TAIL(head, bf, bf_list); 2823 } 2824 return 0; 2825fail3: 2826 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2827fail2: 2828 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2829fail1: 2830 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2831fail0: 2832 bus_dma_tag_destroy(dd->dd_dmat); 2833 memset(dd, 0, sizeof(*dd)); 2834 return error; 2835#undef DS2PHYS 2836#undef ATH_DESC_4KB_BOUND_CHECK 2837} 2838 2839static void 2840ath_descdma_cleanup(struct ath_softc *sc, 2841 struct ath_descdma *dd, ath_bufhead *head) 2842{ 2843 struct ath_buf *bf; 2844 struct ieee80211_node *ni; 2845 2846 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); 2847 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); 2848 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); 2849 bus_dma_tag_destroy(dd->dd_dmat); 2850 2851 TAILQ_FOREACH(bf, head, bf_list) { 2852 if (bf->bf_m) { 2853 m_freem(bf->bf_m); 2854 bf->bf_m = NULL; 2855 } 2856 if (bf->bf_dmamap != NULL) { 2857 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); 2858 bf->bf_dmamap = NULL; 2859 } 2860 ni = bf->bf_node; 2861 bf->bf_node = NULL; 2862 if (ni != NULL) { 2863 /* 2864 * Reclaim node reference. 2865 */ 2866 ieee80211_free_node(ni); 2867 } 2868 } 2869 2870 TAILQ_INIT(head); 2871 free(dd->dd_bufptr, M_ATHDEV); 2872 memset(dd, 0, sizeof(*dd)); 2873} 2874 2875static int 2876ath_desc_alloc(struct ath_softc *sc) 2877{ 2878 int error; 2879 2880 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 2881 "rx", ath_rxbuf, 1); 2882 if (error != 0) 2883 return error; 2884 2885 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2886 "tx", ath_txbuf, ATH_TXDESC); 2887 if (error != 0) { 2888 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2889 return error; 2890 } 2891 sc->sc_txbuf_cnt = ath_txbuf; 2892 2893 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 2894 "tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC); 2895 if (error != 0) { 2896 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2897 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2898 return error; 2899 } 2900 2901 /* 2902 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 2903 * flag doesn't have to be set in ath_getbuf_locked(). 2904 */ 2905 2906 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 2907 "beacon", ATH_BCBUF, 1); 2908 if (error != 0) { 2909 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2910 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2911 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 2912 &sc->sc_txbuf_mgmt); 2913 return error; 2914 } 2915 return 0; 2916} 2917 2918static void 2919ath_desc_free(struct ath_softc *sc) 2920{ 2921 2922 if (sc->sc_bdma.dd_desc_len != 0) 2923 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 2924 if (sc->sc_txdma.dd_desc_len != 0) 2925 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 2926 if (sc->sc_rxdma.dd_desc_len != 0) 2927 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 2928 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 2929 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 2930 &sc->sc_txbuf_mgmt); 2931} 2932 2933static struct ieee80211_node * 2934ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2935{ 2936 struct ieee80211com *ic = vap->iv_ic; 2937 struct ath_softc *sc = ic->ic_ifp->if_softc; 2938 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 2939 struct ath_node *an; 2940 2941 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 2942 if (an == NULL) { 2943 /* XXX stat+msg */ 2944 return NULL; 2945 } 2946 ath_rate_node_init(sc, an); 2947 2948 /* Setup the mutex - there's no associd yet so set the name to NULL */ 2949 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 2950 device_get_nameunit(sc->sc_dev), an); 2951 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 2952 2953 /* XXX setup ath_tid */ 2954 ath_tx_tid_init(sc, an); 2955 2956 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); 2957 return &an->an_node; 2958} 2959 2960static void 2961ath_node_cleanup(struct ieee80211_node *ni) 2962{ 2963 struct ieee80211com *ic = ni->ni_ic; 2964 struct ath_softc *sc = ic->ic_ifp->if_softc; 2965 2966 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 2967 ath_tx_node_flush(sc, ATH_NODE(ni)); 2968 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 2969 sc->sc_node_cleanup(ni); 2970} 2971 2972static void 2973ath_node_free(struct ieee80211_node *ni) 2974{ 2975 struct ieee80211com *ic = ni->ni_ic; 2976 struct ath_softc *sc = ic->ic_ifp->if_softc; 2977 2978 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); 2979 mtx_destroy(&ATH_NODE(ni)->an_mtx); 2980 sc->sc_node_free(ni); 2981} 2982 2983static void 2984ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 2985{ 2986 struct ieee80211com *ic = ni->ni_ic; 2987 struct ath_softc *sc = ic->ic_ifp->if_softc; 2988 struct ath_hal *ah = sc->sc_ah; 2989 2990 *rssi = ic->ic_node_getrssi(ni); 2991 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 2992 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 2993 else 2994 *noise = -95; /* nominally correct */ 2995} 2996 2997/* 2998 * Set the default antenna. 2999 */ 3000void 3001ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3002{ 3003 struct ath_hal *ah = sc->sc_ah; 3004 3005 /* XXX block beacon interrupts */ 3006 ath_hal_setdefantenna(ah, antenna); 3007 if (sc->sc_defant != antenna) 3008 sc->sc_stats.ast_ant_defswitch++; 3009 sc->sc_defant = antenna; 3010 sc->sc_rxotherant = 0; 3011} 3012 3013static void 3014ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3015{ 3016 txq->axq_qnum = qnum; 3017 txq->axq_ac = 0; 3018 txq->axq_depth = 0; 3019 txq->axq_aggr_depth = 0; 3020 txq->axq_intrcnt = 0; 3021 txq->axq_link = NULL; 3022 txq->axq_softc = sc; 3023 TAILQ_INIT(&txq->axq_q); 3024 TAILQ_INIT(&txq->axq_tidq); 3025 ATH_TXQ_LOCK_INIT(sc, txq); 3026} 3027 3028/* 3029 * Setup a h/w transmit queue. 3030 */ 3031static struct ath_txq * 3032ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3033{ 3034#define N(a) (sizeof(a)/sizeof(a[0])) 3035 struct ath_hal *ah = sc->sc_ah; 3036 HAL_TXQ_INFO qi; 3037 int qnum; 3038 3039 memset(&qi, 0, sizeof(qi)); 3040 qi.tqi_subtype = subtype; 3041 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3042 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3043 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3044 /* 3045 * Enable interrupts only for EOL and DESC conditions. 3046 * We mark tx descriptors to receive a DESC interrupt 3047 * when a tx queue gets deep; otherwise waiting for the 3048 * EOL to reap descriptors. Note that this is done to 3049 * reduce interrupt load and this only defers reaping 3050 * descriptors, never transmitting frames. Aside from 3051 * reducing interrupts this also permits more concurrency. 3052 * The only potential downside is if the tx queue backs 3053 * up in which case the top half of the kernel may backup 3054 * due to a lack of tx descriptors. 3055 */ 3056 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; 3057 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3058 if (qnum == -1) { 3059 /* 3060 * NB: don't print a message, this happens 3061 * normally on parts with too few tx queues 3062 */ 3063 return NULL; 3064 } 3065 if (qnum >= N(sc->sc_txq)) { 3066 device_printf(sc->sc_dev, 3067 "hal qnum %u out of range, max %zu!\n", 3068 qnum, N(sc->sc_txq)); 3069 ath_hal_releasetxqueue(ah, qnum); 3070 return NULL; 3071 } 3072 if (!ATH_TXQ_SETUP(sc, qnum)) { 3073 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3074 sc->sc_txqsetup |= 1<<qnum; 3075 } 3076 return &sc->sc_txq[qnum]; 3077#undef N 3078} 3079 3080/* 3081 * Setup a hardware data transmit queue for the specified 3082 * access control. The hal may not support all requested 3083 * queues in which case it will return a reference to a 3084 * previously setup queue. We record the mapping from ac's 3085 * to h/w queues for use by ath_tx_start and also track 3086 * the set of h/w queues being used to optimize work in the 3087 * transmit interrupt handler and related routines. 3088 */ 3089static int 3090ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3091{ 3092#define N(a) (sizeof(a)/sizeof(a[0])) 3093 struct ath_txq *txq; 3094 3095 if (ac >= N(sc->sc_ac2q)) { 3096 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3097 ac, N(sc->sc_ac2q)); 3098 return 0; 3099 } 3100 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3101 if (txq != NULL) { 3102 txq->axq_ac = ac; 3103 sc->sc_ac2q[ac] = txq; 3104 return 1; 3105 } else 3106 return 0; 3107#undef N 3108} 3109 3110/* 3111 * Update WME parameters for a transmit queue. 3112 */ 3113static int 3114ath_txq_update(struct ath_softc *sc, int ac) 3115{ 3116#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3117#define ATH_TXOP_TO_US(v) (v<<5) 3118 struct ifnet *ifp = sc->sc_ifp; 3119 struct ieee80211com *ic = ifp->if_l2com; 3120 struct ath_txq *txq = sc->sc_ac2q[ac]; 3121 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3122 struct ath_hal *ah = sc->sc_ah; 3123 HAL_TXQ_INFO qi; 3124 3125 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3126#ifdef IEEE80211_SUPPORT_TDMA 3127 if (sc->sc_tdma) { 3128 /* 3129 * AIFS is zero so there's no pre-transmit wait. The 3130 * burst time defines the slot duration and is configured 3131 * through net80211. The QCU is setup to not do post-xmit 3132 * back off, lockout all lower-priority QCU's, and fire 3133 * off the DMA beacon alert timer which is setup based 3134 * on the slot configuration. 3135 */ 3136 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3137 | HAL_TXQ_TXERRINT_ENABLE 3138 | HAL_TXQ_TXURNINT_ENABLE 3139 | HAL_TXQ_TXEOLINT_ENABLE 3140 | HAL_TXQ_DBA_GATED 3141 | HAL_TXQ_BACKOFF_DISABLE 3142 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3143 ; 3144 qi.tqi_aifs = 0; 3145 /* XXX +dbaprep? */ 3146 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3147 qi.tqi_burstTime = qi.tqi_readyTime; 3148 } else { 3149#endif 3150 /* 3151 * XXX shouldn't this just use the default flags 3152 * used in the previous queue setup? 3153 */ 3154 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3155 | HAL_TXQ_TXERRINT_ENABLE 3156 | HAL_TXQ_TXDESCINT_ENABLE 3157 | HAL_TXQ_TXURNINT_ENABLE 3158 | HAL_TXQ_TXEOLINT_ENABLE 3159 ; 3160 qi.tqi_aifs = wmep->wmep_aifsn; 3161 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3162 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3163 qi.tqi_readyTime = 0; 3164 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); 3165#ifdef IEEE80211_SUPPORT_TDMA 3166 } 3167#endif 3168 3169 DPRINTF(sc, ATH_DEBUG_RESET, 3170 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3171 __func__, txq->axq_qnum, qi.tqi_qflags, 3172 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3173 3174 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3175 if_printf(ifp, "unable to update hardware queue " 3176 "parameters for %s traffic!\n", 3177 ieee80211_wme_acnames[ac]); 3178 return 0; 3179 } else { 3180 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 3181 return 1; 3182 } 3183#undef ATH_TXOP_TO_US 3184#undef ATH_EXPONENT_TO_VALUE 3185} 3186 3187/* 3188 * Callback from the 802.11 layer to update WME parameters. 3189 */ 3190int 3191ath_wme_update(struct ieee80211com *ic) 3192{ 3193 struct ath_softc *sc = ic->ic_ifp->if_softc; 3194 3195 return !ath_txq_update(sc, WME_AC_BE) || 3196 !ath_txq_update(sc, WME_AC_BK) || 3197 !ath_txq_update(sc, WME_AC_VI) || 3198 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 3199} 3200 3201/* 3202 * Reclaim resources for a setup queue. 3203 */ 3204static void 3205ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 3206{ 3207 3208 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 3209 ATH_TXQ_LOCK_DESTROY(txq); 3210 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 3211} 3212 3213/* 3214 * Reclaim all tx queue resources. 3215 */ 3216static void 3217ath_tx_cleanup(struct ath_softc *sc) 3218{ 3219 int i; 3220 3221 ATH_TXBUF_LOCK_DESTROY(sc); 3222 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3223 if (ATH_TXQ_SETUP(sc, i)) 3224 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 3225} 3226 3227/* 3228 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 3229 * using the current rates in sc_rixmap. 3230 */ 3231int 3232ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 3233{ 3234 int rix = sc->sc_rixmap[rate]; 3235 /* NB: return lowest rix for invalid rate */ 3236 return (rix == 0xff ? 0 : rix); 3237} 3238 3239static void 3240ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 3241 struct ath_buf *bf) 3242{ 3243 struct ieee80211_node *ni = bf->bf_node; 3244 struct ifnet *ifp = sc->sc_ifp; 3245 struct ieee80211com *ic = ifp->if_l2com; 3246 int sr, lr, pri; 3247 3248 if (ts->ts_status == 0) { 3249 u_int8_t txant = ts->ts_antenna; 3250 sc->sc_stats.ast_ant_tx[txant]++; 3251 sc->sc_ant_tx[txant]++; 3252 if (ts->ts_finaltsi != 0) 3253 sc->sc_stats.ast_tx_altrate++; 3254 pri = M_WME_GETAC(bf->bf_m); 3255 if (pri >= WME_AC_VO) 3256 ic->ic_wme.wme_hipri_traffic++; 3257 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 3258 ni->ni_inact = ni->ni_inact_reload; 3259 } else { 3260 if (ts->ts_status & HAL_TXERR_XRETRY) 3261 sc->sc_stats.ast_tx_xretries++; 3262 if (ts->ts_status & HAL_TXERR_FIFO) 3263 sc->sc_stats.ast_tx_fifoerr++; 3264 if (ts->ts_status & HAL_TXERR_FILT) 3265 sc->sc_stats.ast_tx_filtered++; 3266 if (ts->ts_status & HAL_TXERR_XTXOP) 3267 sc->sc_stats.ast_tx_xtxop++; 3268 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 3269 sc->sc_stats.ast_tx_timerexpired++; 3270 3271 if (ts->ts_status & HAL_TX_DATA_UNDERRUN) 3272 sc->sc_stats.ast_tx_data_underrun++; 3273 if (ts->ts_status & HAL_TX_DELIM_UNDERRUN) 3274 sc->sc_stats.ast_tx_delim_underrun++; 3275 3276 if (bf->bf_m->m_flags & M_FF) 3277 sc->sc_stats.ast_ff_txerr++; 3278 } 3279 /* XXX when is this valid? */ 3280 if (ts->ts_status & HAL_TX_DESC_CFG_ERR) 3281 sc->sc_stats.ast_tx_desccfgerr++; 3282 3283 sr = ts->ts_shortretry; 3284 lr = ts->ts_longretry; 3285 sc->sc_stats.ast_tx_shortretry += sr; 3286 sc->sc_stats.ast_tx_longretry += lr; 3287 3288} 3289 3290/* 3291 * The default completion. If fail is 1, this means 3292 * "please don't retry the frame, and just return -1 status 3293 * to the net80211 stack. 3294 */ 3295void 3296ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 3297{ 3298 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 3299 int st; 3300 3301 if (fail == 1) 3302 st = -1; 3303 else 3304 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 3305 ts->ts_status : HAL_TXERR_XRETRY; 3306 3307 if (bf->bf_state.bfs_dobaw) 3308 device_printf(sc->sc_dev, 3309 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 3310 __func__, 3311 bf, 3312 SEQNO(bf->bf_state.bfs_seqno)); 3313 if (bf->bf_next != NULL) 3314 device_printf(sc->sc_dev, 3315 "%s: bf %p: seqno %d: bf_next not NULL!\n", 3316 __func__, 3317 bf, 3318 SEQNO(bf->bf_state.bfs_seqno)); 3319 3320 /* 3321 * Do any tx complete callback. Note this must 3322 * be done before releasing the node reference. 3323 * This will free the mbuf, release the net80211 3324 * node and recycle the ath_buf. 3325 */ 3326 ath_tx_freebuf(sc, bf, st); 3327} 3328 3329/* 3330 * Update rate control with the given completion status. 3331 */ 3332void 3333ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 3334 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 3335 int nframes, int nbad) 3336{ 3337 struct ath_node *an; 3338 3339 /* Only for unicast frames */ 3340 if (ni == NULL) 3341 return; 3342 3343 an = ATH_NODE(ni); 3344 3345 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 3346 ATH_NODE_LOCK(an); 3347 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 3348 ATH_NODE_UNLOCK(an); 3349 } 3350} 3351 3352/* 3353 * Update the busy status of the last frame on the free list. 3354 * When doing TDMA, the busy flag tracks whether the hardware 3355 * currently points to this buffer or not, and thus gated DMA 3356 * may restart by re-reading the last descriptor in this 3357 * buffer. 3358 * 3359 * This should be called in the completion function once one 3360 * of the buffers has been used. 3361 */ 3362static void 3363ath_tx_update_busy(struct ath_softc *sc) 3364{ 3365 struct ath_buf *last; 3366 3367 /* 3368 * Since the last frame may still be marked 3369 * as ATH_BUF_BUSY, unmark it here before 3370 * finishing the frame processing. 3371 * Since we've completed a frame (aggregate 3372 * or otherwise), the hardware has moved on 3373 * and is no longer referencing the previous 3374 * descriptor. 3375 */ 3376 ATH_TXBUF_LOCK_ASSERT(sc); 3377 last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3378 if (last != NULL) 3379 last->bf_flags &= ~ATH_BUF_BUSY; 3380 last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3381 if (last != NULL) 3382 last->bf_flags &= ~ATH_BUF_BUSY; 3383} 3384 3385/* 3386 * Process completed xmit descriptors from the specified queue. 3387 * Kick the packet scheduler if needed. This can occur from this 3388 * particular task. 3389 */ 3390static int 3391ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 3392{ 3393 struct ath_hal *ah = sc->sc_ah; 3394 struct ath_buf *bf; 3395 struct ath_desc *ds; 3396 struct ath_tx_status *ts; 3397 struct ieee80211_node *ni; 3398 struct ath_node *an; 3399#ifdef IEEE80211_SUPPORT_SUPERG 3400 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3401#endif /* IEEE80211_SUPPORT_SUPERG */ 3402 int nacked; 3403 HAL_STATUS status; 3404 3405 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 3406 __func__, txq->axq_qnum, 3407 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 3408 txq->axq_link); 3409 nacked = 0; 3410 for (;;) { 3411 ATH_TXQ_LOCK(txq); 3412 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 3413 bf = TAILQ_FIRST(&txq->axq_q); 3414 if (bf == NULL) { 3415 ATH_TXQ_UNLOCK(txq); 3416 break; 3417 } 3418 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 3419 ts = &bf->bf_status.ds_txstat; 3420 status = ath_hal_txprocdesc(ah, ds, ts); 3421#ifdef ATH_DEBUG 3422 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 3423 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3424 status == HAL_OK); 3425 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) { 3426 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 3427 status == HAL_OK); 3428 } 3429#endif 3430 if (status == HAL_EINPROGRESS) { 3431 ATH_TXQ_UNLOCK(txq); 3432 break; 3433 } 3434 ATH_TXQ_REMOVE(txq, bf, bf_list); 3435#ifdef IEEE80211_SUPPORT_TDMA 3436 if (txq->axq_depth > 0) { 3437 /* 3438 * More frames follow. Mark the buffer busy 3439 * so it's not re-used while the hardware may 3440 * still re-read the link field in the descriptor. 3441 * 3442 * Use the last buffer in an aggregate as that 3443 * is where the hardware may be - intermediate 3444 * descriptors won't be "busy". 3445 */ 3446 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 3447 } else 3448#else 3449 if (txq->axq_depth == 0) 3450#endif 3451 txq->axq_link = NULL; 3452 if (bf->bf_state.bfs_aggr) 3453 txq->axq_aggr_depth--; 3454 3455 ni = bf->bf_node; 3456 /* 3457 * If unicast frame was ack'd update RSSI, 3458 * including the last rx time used to 3459 * workaround phantom bmiss interrupts. 3460 */ 3461 if (ni != NULL && ts->ts_status == 0 && 3462 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 3463 nacked++; 3464 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 3465 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 3466 ts->ts_rssi); 3467 } 3468 ATH_TXQ_UNLOCK(txq); 3469 3470 /* If unicast frame, update general statistics */ 3471 if (ni != NULL) { 3472 an = ATH_NODE(ni); 3473 /* update statistics */ 3474 ath_tx_update_stats(sc, ts, bf); 3475 } 3476 3477 /* 3478 * Call the completion handler. 3479 * The completion handler is responsible for 3480 * calling the rate control code. 3481 * 3482 * Frames with no completion handler get the 3483 * rate control code called here. 3484 */ 3485 if (bf->bf_comp == NULL) { 3486 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 3487 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 3488 /* 3489 * XXX assume this isn't an aggregate 3490 * frame. 3491 */ 3492 ath_tx_update_ratectrl(sc, ni, 3493 bf->bf_state.bfs_rc, ts, 3494 bf->bf_state.bfs_pktlen, 1, 3495 (ts->ts_status == 0 ? 0 : 1)); 3496 } 3497 ath_tx_default_comp(sc, bf, 0); 3498 } else 3499 bf->bf_comp(sc, bf, 0); 3500 } 3501#ifdef IEEE80211_SUPPORT_SUPERG 3502 /* 3503 * Flush fast-frame staging queue when traffic slows. 3504 */ 3505 if (txq->axq_depth <= 1) 3506 ieee80211_ff_flush(ic, txq->axq_ac); 3507#endif 3508 3509 /* Kick the TXQ scheduler */ 3510 if (dosched) { 3511 ATH_TXQ_LOCK(txq); 3512 ath_txq_sched(sc, txq); 3513 ATH_TXQ_UNLOCK(txq); 3514 } 3515 3516 return nacked; 3517} 3518 3519#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 3520 3521/* 3522 * Deferred processing of transmit interrupt; special-cased 3523 * for a single hardware transmit queue (e.g. 5210 and 5211). 3524 */ 3525static void 3526ath_tx_proc_q0(void *arg, int npending) 3527{ 3528 struct ath_softc *sc = arg; 3529 struct ifnet *ifp = sc->sc_ifp; 3530 uint32_t txqs; 3531 3532 ATH_PCU_LOCK(sc); 3533 sc->sc_txproc_cnt++; 3534 txqs = sc->sc_txq_active; 3535 sc->sc_txq_active &= ~txqs; 3536 ATH_PCU_UNLOCK(sc); 3537 3538 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 3539 /* XXX why is lastrx updated in tx code? */ 3540 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3541 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3542 ath_tx_processq(sc, sc->sc_cabq, 1); 3543 IF_LOCK(&ifp->if_snd); 3544 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3545 IF_UNLOCK(&ifp->if_snd); 3546 sc->sc_wd_timer = 0; 3547 3548 if (sc->sc_softled) 3549 ath_led_event(sc, sc->sc_txrix); 3550 3551 ATH_PCU_LOCK(sc); 3552 sc->sc_txproc_cnt--; 3553 ATH_PCU_UNLOCK(sc); 3554 3555 ath_tx_kick(sc); 3556} 3557 3558/* 3559 * Deferred processing of transmit interrupt; special-cased 3560 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 3561 */ 3562static void 3563ath_tx_proc_q0123(void *arg, int npending) 3564{ 3565 struct ath_softc *sc = arg; 3566 struct ifnet *ifp = sc->sc_ifp; 3567 int nacked; 3568 uint32_t txqs; 3569 3570 ATH_PCU_LOCK(sc); 3571 sc->sc_txproc_cnt++; 3572 txqs = sc->sc_txq_active; 3573 sc->sc_txq_active &= ~txqs; 3574 ATH_PCU_UNLOCK(sc); 3575 3576 /* 3577 * Process each active queue. 3578 */ 3579 nacked = 0; 3580 if (TXQACTIVE(txqs, 0)) 3581 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 3582 if (TXQACTIVE(txqs, 1)) 3583 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 3584 if (TXQACTIVE(txqs, 2)) 3585 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 3586 if (TXQACTIVE(txqs, 3)) 3587 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 3588 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 3589 ath_tx_processq(sc, sc->sc_cabq, 1); 3590 if (nacked) 3591 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3592 3593 IF_LOCK(&ifp->if_snd); 3594 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3595 IF_UNLOCK(&ifp->if_snd); 3596 sc->sc_wd_timer = 0; 3597 3598 if (sc->sc_softled) 3599 ath_led_event(sc, sc->sc_txrix); 3600 3601 ATH_PCU_LOCK(sc); 3602 sc->sc_txproc_cnt--; 3603 ATH_PCU_UNLOCK(sc); 3604 3605 ath_tx_kick(sc); 3606} 3607 3608/* 3609 * Deferred processing of transmit interrupt. 3610 */ 3611static void 3612ath_tx_proc(void *arg, int npending) 3613{ 3614 struct ath_softc *sc = arg; 3615 struct ifnet *ifp = sc->sc_ifp; 3616 int i, nacked; 3617 uint32_t txqs; 3618 3619 ATH_PCU_LOCK(sc); 3620 sc->sc_txproc_cnt++; 3621 txqs = sc->sc_txq_active; 3622 sc->sc_txq_active &= ~txqs; 3623 ATH_PCU_UNLOCK(sc); 3624 3625 /* 3626 * Process each active queue. 3627 */ 3628 nacked = 0; 3629 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3630 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 3631 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 3632 if (nacked) 3633 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 3634 3635 /* XXX check this inside of IF_LOCK? */ 3636 IF_LOCK(&ifp->if_snd); 3637 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3638 IF_UNLOCK(&ifp->if_snd); 3639 sc->sc_wd_timer = 0; 3640 3641 if (sc->sc_softled) 3642 ath_led_event(sc, sc->sc_txrix); 3643 3644 ATH_PCU_LOCK(sc); 3645 sc->sc_txproc_cnt--; 3646 ATH_PCU_UNLOCK(sc); 3647 3648 ath_tx_kick(sc); 3649} 3650#undef TXQACTIVE 3651 3652/* 3653 * Deferred processing of TXQ rescheduling. 3654 */ 3655static void 3656ath_txq_sched_tasklet(void *arg, int npending) 3657{ 3658 struct ath_softc *sc = arg; 3659 int i; 3660 3661 /* XXX is skipping ok? */ 3662 ATH_PCU_LOCK(sc); 3663#if 0 3664 if (sc->sc_inreset_cnt > 0) { 3665 device_printf(sc->sc_dev, 3666 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 3667 ATH_PCU_UNLOCK(sc); 3668 return; 3669 } 3670#endif 3671 sc->sc_txproc_cnt++; 3672 ATH_PCU_UNLOCK(sc); 3673 3674 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3675 if (ATH_TXQ_SETUP(sc, i)) { 3676 ATH_TXQ_LOCK(&sc->sc_txq[i]); 3677 ath_txq_sched(sc, &sc->sc_txq[i]); 3678 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 3679 } 3680 } 3681 3682 ATH_PCU_LOCK(sc); 3683 sc->sc_txproc_cnt--; 3684 ATH_PCU_UNLOCK(sc); 3685} 3686 3687void 3688ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 3689{ 3690 3691 ATH_TXBUF_LOCK_ASSERT(sc); 3692 3693 if (bf->bf_flags & ATH_BUF_MGMT) 3694 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 3695 else { 3696 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 3697 sc->sc_txbuf_cnt++; 3698 if (sc->sc_txbuf_cnt > ath_txbuf) { 3699 device_printf(sc->sc_dev, 3700 "%s: sc_txbuf_cnt > %d?\n", 3701 __func__, 3702 ath_txbuf); 3703 sc->sc_txbuf_cnt = ath_txbuf; 3704 } 3705 } 3706} 3707 3708void 3709ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 3710{ 3711 3712 ATH_TXBUF_LOCK_ASSERT(sc); 3713 3714 if (bf->bf_flags & ATH_BUF_MGMT) 3715 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 3716 else { 3717 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 3718 sc->sc_txbuf_cnt++; 3719 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 3720 device_printf(sc->sc_dev, 3721 "%s: sc_txbuf_cnt > %d?\n", 3722 __func__, 3723 ATH_TXBUF); 3724 sc->sc_txbuf_cnt = ATH_TXBUF; 3725 } 3726 } 3727} 3728 3729/* 3730 * Return a buffer to the pool and update the 'busy' flag on the 3731 * previous 'tail' entry. 3732 * 3733 * This _must_ only be called when the buffer is involved in a completed 3734 * TX. The logic is that if it was part of an active TX, the previous 3735 * buffer on the list is now not involved in a halted TX DMA queue, waiting 3736 * for restart (eg for TDMA.) 3737 * 3738 * The caller must free the mbuf and recycle the node reference. 3739 */ 3740void 3741ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 3742{ 3743 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3744 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); 3745 3746 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 3747 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 3748 3749 ATH_TXBUF_LOCK(sc); 3750 ath_tx_update_busy(sc); 3751 ath_returnbuf_tail(sc, bf); 3752 ATH_TXBUF_UNLOCK(sc); 3753} 3754 3755/* 3756 * This is currently used by ath_tx_draintxq() and 3757 * ath_tx_tid_free_pkts(). 3758 * 3759 * It recycles a single ath_buf. 3760 */ 3761void 3762ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 3763{ 3764 struct ieee80211_node *ni = bf->bf_node; 3765 struct mbuf *m0 = bf->bf_m; 3766 3767 bf->bf_node = NULL; 3768 bf->bf_m = NULL; 3769 3770 /* Free the buffer, it's not needed any longer */ 3771 ath_freebuf(sc, bf); 3772 3773 if (ni != NULL) { 3774 /* 3775 * Do any callback and reclaim the node reference. 3776 */ 3777 if (m0->m_flags & M_TXCB) 3778 ieee80211_process_callback(ni, m0, status); 3779 ieee80211_free_node(ni); 3780 } 3781 m_freem(m0); 3782 3783 /* 3784 * XXX the buffer used to be freed -after-, but the DMA map was 3785 * freed where ath_freebuf() now is. I've no idea what this 3786 * will do. 3787 */ 3788} 3789 3790void 3791ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 3792{ 3793#ifdef ATH_DEBUG 3794 struct ath_hal *ah = sc->sc_ah; 3795#endif 3796 struct ath_buf *bf; 3797 u_int ix; 3798 3799 /* 3800 * NB: this assumes output has been stopped and 3801 * we do not need to block ath_tx_proc 3802 */ 3803 ATH_TXBUF_LOCK(sc); 3804 bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s); 3805 if (bf != NULL) 3806 bf->bf_flags &= ~ATH_BUF_BUSY; 3807 bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s); 3808 if (bf != NULL) 3809 bf->bf_flags &= ~ATH_BUF_BUSY; 3810 ATH_TXBUF_UNLOCK(sc); 3811 3812 for (ix = 0;; ix++) { 3813 ATH_TXQ_LOCK(txq); 3814 bf = TAILQ_FIRST(&txq->axq_q); 3815 if (bf == NULL) { 3816 txq->axq_link = NULL; 3817 ATH_TXQ_UNLOCK(txq); 3818 break; 3819 } 3820 ATH_TXQ_REMOVE(txq, bf, bf_list); 3821 if (bf->bf_state.bfs_aggr) 3822 txq->axq_aggr_depth--; 3823#ifdef ATH_DEBUG 3824 if (sc->sc_debug & ATH_DEBUG_RESET) { 3825 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3826 3827 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, 3828 ath_hal_txprocdesc(ah, bf->bf_lastds, 3829 &bf->bf_status.ds_txstat) == HAL_OK); 3830 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 3831 bf->bf_m->m_len, 0, -1); 3832 } 3833#endif /* ATH_DEBUG */ 3834 /* 3835 * Since we're now doing magic in the completion 3836 * functions, we -must- call it for aggregation 3837 * destinations or BAW tracking will get upset. 3838 */ 3839 /* 3840 * Clear ATH_BUF_BUSY; the completion handler 3841 * will free the buffer. 3842 */ 3843 ATH_TXQ_UNLOCK(txq); 3844 bf->bf_flags &= ~ATH_BUF_BUSY; 3845 if (bf->bf_comp) 3846 bf->bf_comp(sc, bf, 1); 3847 else 3848 ath_tx_default_comp(sc, bf, 1); 3849 } 3850 3851 /* 3852 * Drain software queued frames which are on 3853 * active TIDs. 3854 */ 3855 ath_tx_txq_drain(sc, txq); 3856} 3857 3858static void 3859ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 3860{ 3861 struct ath_hal *ah = sc->sc_ah; 3862 3863 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3864 __func__, txq->axq_qnum, 3865 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 3866 txq->axq_link); 3867 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 3868} 3869 3870static int 3871ath_stoptxdma(struct ath_softc *sc) 3872{ 3873 struct ath_hal *ah = sc->sc_ah; 3874 int i; 3875 3876 /* XXX return value */ 3877 if (sc->sc_invalid) 3878 return 0; 3879 3880 if (!sc->sc_invalid) { 3881 /* don't touch the hardware if marked invalid */ 3882 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 3883 __func__, sc->sc_bhalq, 3884 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 3885 NULL); 3886 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 3887 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 3888 if (ATH_TXQ_SETUP(sc, i)) 3889 ath_tx_stopdma(sc, &sc->sc_txq[i]); 3890 } 3891 3892 return 1; 3893} 3894 3895/* 3896 * Drain the transmit queues and reclaim resources. 3897 */ 3898static void 3899ath_draintxq(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 3900{ 3901#ifdef ATH_DEBUG 3902 struct ath_hal *ah = sc->sc_ah; 3903#endif 3904 struct ifnet *ifp = sc->sc_ifp; 3905 int i; 3906 3907 (void) ath_stoptxdma(sc); 3908 3909 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 3910 /* 3911 * XXX TODO: should we just handle the completed TX frames 3912 * here, whether or not the reset is a full one or not? 3913 */ 3914 if (ATH_TXQ_SETUP(sc, i)) { 3915 if (reset_type == ATH_RESET_NOLOSS) 3916 ath_tx_processq(sc, &sc->sc_txq[i], 0); 3917 else 3918 ath_tx_draintxq(sc, &sc->sc_txq[i]); 3919 } 3920 } 3921#ifdef ATH_DEBUG 3922 if (sc->sc_debug & ATH_DEBUG_RESET) { 3923 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 3924 if (bf != NULL && bf->bf_m != NULL) { 3925 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 3926 ath_hal_txprocdesc(ah, bf->bf_lastds, 3927 &bf->bf_status.ds_txstat) == HAL_OK); 3928 ieee80211_dump_pkt(ifp->if_l2com, 3929 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 3930 0, -1); 3931 } 3932 } 3933#endif /* ATH_DEBUG */ 3934 IF_LOCK(&ifp->if_snd); 3935 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3936 IF_UNLOCK(&ifp->if_snd); 3937 sc->sc_wd_timer = 0; 3938} 3939 3940/* 3941 * Update internal state after a channel change. 3942 */ 3943static void 3944ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 3945{ 3946 enum ieee80211_phymode mode; 3947 3948 /* 3949 * Change channels and update the h/w rate map 3950 * if we're switching; e.g. 11a to 11b/g. 3951 */ 3952 mode = ieee80211_chan2mode(chan); 3953 if (mode != sc->sc_curmode) 3954 ath_setcurmode(sc, mode); 3955 sc->sc_curchan = chan; 3956} 3957 3958/* 3959 * Set/change channels. If the channel is really being changed, 3960 * it's done by resetting the chip. To accomplish this we must 3961 * first cleanup any pending DMA, then restart stuff after a la 3962 * ath_init. 3963 */ 3964static int 3965ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 3966{ 3967 struct ifnet *ifp = sc->sc_ifp; 3968 struct ieee80211com *ic = ifp->if_l2com; 3969 struct ath_hal *ah = sc->sc_ah; 3970 int ret = 0; 3971 3972 /* Treat this as an interface reset */ 3973 ATH_PCU_UNLOCK_ASSERT(sc); 3974 ATH_UNLOCK_ASSERT(sc); 3975 3976 /* (Try to) stop TX/RX from occuring */ 3977 taskqueue_block(sc->sc_tq); 3978 3979 ATH_PCU_LOCK(sc); 3980 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */ 3981 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */ 3982 if (ath_reset_grablock(sc, 1) == 0) { 3983 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 3984 __func__); 3985 } 3986 ATH_PCU_UNLOCK(sc); 3987 3988 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 3989 __func__, ieee80211_chan2ieee(ic, chan), 3990 chan->ic_freq, chan->ic_flags); 3991 if (chan != sc->sc_curchan) { 3992 HAL_STATUS status; 3993 /* 3994 * To switch channels clear any pending DMA operations; 3995 * wait long enough for the RX fifo to drain, reset the 3996 * hardware at the new frequency, and then re-enable 3997 * the relevant bits of the h/w. 3998 */ 3999#if 0 4000 ath_hal_intrset(ah, 0); /* disable interrupts */ 4001#endif 4002 ath_stoprecv(sc, 1); /* turn off frame recv */ 4003 /* 4004 * First, handle completed TX/RX frames. 4005 */ 4006 ath_rx_proc(sc, 0); 4007 ath_draintxq(sc, ATH_RESET_NOLOSS); 4008 /* 4009 * Next, flush the non-scheduled frames. 4010 */ 4011 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 4012 4013 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { 4014 if_printf(ifp, "%s: unable to reset " 4015 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 4016 __func__, ieee80211_chan2ieee(ic, chan), 4017 chan->ic_freq, chan->ic_flags, status); 4018 ret = EIO; 4019 goto finish; 4020 } 4021 sc->sc_diversity = ath_hal_getdiversity(ah); 4022 4023 /* Let DFS at it in case it's a DFS channel */ 4024 ath_dfs_radar_enable(sc, chan); 4025 4026 /* 4027 * Re-enable rx framework. 4028 */ 4029 if (ath_startrecv(sc) != 0) { 4030 if_printf(ifp, "%s: unable to restart recv logic\n", 4031 __func__); 4032 ret = EIO; 4033 goto finish; 4034 } 4035 4036 /* 4037 * Change channels and update the h/w rate map 4038 * if we're switching; e.g. 11a to 11b/g. 4039 */ 4040 ath_chan_change(sc, chan); 4041 4042 /* 4043 * Reset clears the beacon timers; reset them 4044 * here if needed. 4045 */ 4046 if (sc->sc_beacons) { /* restart beacons */ 4047#ifdef IEEE80211_SUPPORT_TDMA 4048 if (sc->sc_tdma) 4049 ath_tdma_config(sc, NULL); 4050 else 4051#endif 4052 ath_beacon_config(sc, NULL); 4053 } 4054 4055 /* 4056 * Re-enable interrupts. 4057 */ 4058#if 0 4059 ath_hal_intrset(ah, sc->sc_imask); 4060#endif 4061 } 4062 4063finish: 4064 ATH_PCU_LOCK(sc); 4065 sc->sc_inreset_cnt--; 4066 /* XXX only do this if sc_inreset_cnt == 0? */ 4067 ath_hal_intrset(ah, sc->sc_imask); 4068 ATH_PCU_UNLOCK(sc); 4069 4070 IF_LOCK(&ifp->if_snd); 4071 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4072 IF_UNLOCK(&ifp->if_snd); 4073 ath_txrx_start(sc); 4074 /* XXX ath_start? */ 4075 4076 return ret; 4077} 4078 4079/* 4080 * Periodically recalibrate the PHY to account 4081 * for temperature/environment changes. 4082 */ 4083static void 4084ath_calibrate(void *arg) 4085{ 4086 struct ath_softc *sc = arg; 4087 struct ath_hal *ah = sc->sc_ah; 4088 struct ifnet *ifp = sc->sc_ifp; 4089 struct ieee80211com *ic = ifp->if_l2com; 4090 HAL_BOOL longCal, isCalDone; 4091 HAL_BOOL aniCal, shortCal = AH_FALSE; 4092 int nextcal; 4093 4094 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 4095 goto restart; 4096 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 4097 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 4098 if (sc->sc_doresetcal) 4099 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 4100 4101 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 4102 if (aniCal) { 4103 sc->sc_stats.ast_ani_cal++; 4104 sc->sc_lastani = ticks; 4105 ath_hal_ani_poll(ah, sc->sc_curchan); 4106 } 4107 4108 if (longCal) { 4109 sc->sc_stats.ast_per_cal++; 4110 sc->sc_lastlongcal = ticks; 4111 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 4112 /* 4113 * Rfgain is out of bounds, reset the chip 4114 * to load new gain values. 4115 */ 4116 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4117 "%s: rfgain change\n", __func__); 4118 sc->sc_stats.ast_per_rfgain++; 4119 sc->sc_resetcal = 0; 4120 sc->sc_doresetcal = AH_TRUE; 4121 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4122 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4123 return; 4124 } 4125 /* 4126 * If this long cal is after an idle period, then 4127 * reset the data collection state so we start fresh. 4128 */ 4129 if (sc->sc_resetcal) { 4130 (void) ath_hal_calreset(ah, sc->sc_curchan); 4131 sc->sc_lastcalreset = ticks; 4132 sc->sc_lastshortcal = ticks; 4133 sc->sc_resetcal = 0; 4134 sc->sc_doresetcal = AH_TRUE; 4135 } 4136 } 4137 4138 /* Only call if we're doing a short/long cal, not for ANI calibration */ 4139 if (shortCal || longCal) { 4140 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 4141 if (longCal) { 4142 /* 4143 * Calibrate noise floor data again in case of change. 4144 */ 4145 ath_hal_process_noisefloor(ah); 4146 } 4147 } else { 4148 DPRINTF(sc, ATH_DEBUG_ANY, 4149 "%s: calibration of channel %u failed\n", 4150 __func__, sc->sc_curchan->ic_freq); 4151 sc->sc_stats.ast_per_calfail++; 4152 } 4153 if (shortCal) 4154 sc->sc_lastshortcal = ticks; 4155 } 4156 if (!isCalDone) { 4157restart: 4158 /* 4159 * Use a shorter interval to potentially collect multiple 4160 * data samples required to complete calibration. Once 4161 * we're told the work is done we drop back to a longer 4162 * interval between requests. We're more aggressive doing 4163 * work when operating as an AP to improve operation right 4164 * after startup. 4165 */ 4166 sc->sc_lastshortcal = ticks; 4167 nextcal = ath_shortcalinterval*hz/1000; 4168 if (sc->sc_opmode != HAL_M_HOSTAP) 4169 nextcal *= 10; 4170 sc->sc_doresetcal = AH_TRUE; 4171 } else { 4172 /* nextcal should be the shortest time for next event */ 4173 nextcal = ath_longcalinterval*hz; 4174 if (sc->sc_lastcalreset == 0) 4175 sc->sc_lastcalreset = sc->sc_lastlongcal; 4176 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 4177 sc->sc_resetcal = 1; /* setup reset next trip */ 4178 sc->sc_doresetcal = AH_FALSE; 4179 } 4180 /* ANI calibration may occur more often than short/long/resetcal */ 4181 if (ath_anicalinterval > 0) 4182 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 4183 4184 if (nextcal != 0) { 4185 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 4186 __func__, nextcal, isCalDone ? "" : "!"); 4187 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 4188 } else { 4189 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 4190 __func__); 4191 /* NB: don't rearm timer */ 4192 } 4193} 4194 4195static void 4196ath_scan_start(struct ieee80211com *ic) 4197{ 4198 struct ifnet *ifp = ic->ic_ifp; 4199 struct ath_softc *sc = ifp->if_softc; 4200 struct ath_hal *ah = sc->sc_ah; 4201 u_int32_t rfilt; 4202 4203 /* XXX calibration timer? */ 4204 4205 ATH_LOCK(sc); 4206 sc->sc_scanning = 1; 4207 sc->sc_syncbeacon = 0; 4208 rfilt = ath_calcrxfilter(sc); 4209 ATH_UNLOCK(sc); 4210 4211 ATH_PCU_LOCK(sc); 4212 ath_hal_setrxfilter(ah, rfilt); 4213 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); 4214 ATH_PCU_UNLOCK(sc); 4215 4216 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 4217 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); 4218} 4219 4220static void 4221ath_scan_end(struct ieee80211com *ic) 4222{ 4223 struct ifnet *ifp = ic->ic_ifp; 4224 struct ath_softc *sc = ifp->if_softc; 4225 struct ath_hal *ah = sc->sc_ah; 4226 u_int32_t rfilt; 4227 4228 ATH_LOCK(sc); 4229 sc->sc_scanning = 0; 4230 rfilt = ath_calcrxfilter(sc); 4231 ATH_UNLOCK(sc); 4232 4233 ATH_PCU_LOCK(sc); 4234 ath_hal_setrxfilter(ah, rfilt); 4235 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4236 4237 ath_hal_process_noisefloor(ah); 4238 ATH_PCU_UNLOCK(sc); 4239 4240 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4241 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 4242 sc->sc_curaid); 4243} 4244 4245#ifdef ATH_ENABLE_11N 4246/* 4247 * For now, just do a channel change. 4248 * 4249 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 4250 * control state and resetting the hardware without dropping frames out 4251 * of the queue. 4252 * 4253 * The unfortunate trouble here is making absolutely sure that the 4254 * channel width change has propagated enough so the hardware 4255 * absolutely isn't handed bogus frames for it's current operating 4256 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 4257 * does occur in parallel, we need to make certain we've blocked 4258 * any further ongoing TX (and RX, that can cause raw TX) 4259 * before we do this. 4260 */ 4261static void 4262ath_update_chw(struct ieee80211com *ic) 4263{ 4264 struct ifnet *ifp = ic->ic_ifp; 4265 struct ath_softc *sc = ifp->if_softc; 4266 4267 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 4268 ath_set_channel(ic); 4269} 4270#endif /* ATH_ENABLE_11N */ 4271 4272static void 4273ath_set_channel(struct ieee80211com *ic) 4274{ 4275 struct ifnet *ifp = ic->ic_ifp; 4276 struct ath_softc *sc = ifp->if_softc; 4277 4278 (void) ath_chan_set(sc, ic->ic_curchan); 4279 /* 4280 * If we are returning to our bss channel then mark state 4281 * so the next recv'd beacon's tsf will be used to sync the 4282 * beacon timers. Note that since we only hear beacons in 4283 * sta/ibss mode this has no effect in other operating modes. 4284 */ 4285 ATH_LOCK(sc); 4286 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 4287 sc->sc_syncbeacon = 1; 4288 ATH_UNLOCK(sc); 4289} 4290 4291/* 4292 * Walk the vap list and check if there any vap's in RUN state. 4293 */ 4294static int 4295ath_isanyrunningvaps(struct ieee80211vap *this) 4296{ 4297 struct ieee80211com *ic = this->iv_ic; 4298 struct ieee80211vap *vap; 4299 4300 IEEE80211_LOCK_ASSERT(ic); 4301 4302 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 4303 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 4304 return 1; 4305 } 4306 return 0; 4307} 4308 4309static int 4310ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4311{ 4312 struct ieee80211com *ic = vap->iv_ic; 4313 struct ath_softc *sc = ic->ic_ifp->if_softc; 4314 struct ath_vap *avp = ATH_VAP(vap); 4315 struct ath_hal *ah = sc->sc_ah; 4316 struct ieee80211_node *ni = NULL; 4317 int i, error, stamode; 4318 u_int32_t rfilt; 4319 int csa_run_transition = 0; 4320 static const HAL_LED_STATE leds[] = { 4321 HAL_LED_INIT, /* IEEE80211_S_INIT */ 4322 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 4323 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 4324 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 4325 HAL_LED_RUN, /* IEEE80211_S_CAC */ 4326 HAL_LED_RUN, /* IEEE80211_S_RUN */ 4327 HAL_LED_RUN, /* IEEE80211_S_CSA */ 4328 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 4329 }; 4330 4331 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 4332 ieee80211_state_name[vap->iv_state], 4333 ieee80211_state_name[nstate]); 4334 4335 /* 4336 * net80211 _should_ have the comlock asserted at this point. 4337 * There are some comments around the calls to vap->iv_newstate 4338 * which indicate that it (newstate) may end up dropping the 4339 * lock. This and the subsequent lock assert check after newstate 4340 * are an attempt to catch these and figure out how/why. 4341 */ 4342 IEEE80211_LOCK_ASSERT(ic); 4343 4344 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 4345 csa_run_transition = 1; 4346 4347 callout_drain(&sc->sc_cal_ch); 4348 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 4349 4350 if (nstate == IEEE80211_S_SCAN) { 4351 /* 4352 * Scanning: turn off beacon miss and don't beacon. 4353 * Mark beacon state so when we reach RUN state we'll 4354 * [re]setup beacons. Unblock the task q thread so 4355 * deferred interrupt processing is done. 4356 */ 4357 ath_hal_intrset(ah, 4358 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 4359 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4360 sc->sc_beacons = 0; 4361 taskqueue_unblock(sc->sc_tq); 4362 } 4363 4364 ni = ieee80211_ref_node(vap->iv_bss); 4365 rfilt = ath_calcrxfilter(sc); 4366 stamode = (vap->iv_opmode == IEEE80211_M_STA || 4367 vap->iv_opmode == IEEE80211_M_AHDEMO || 4368 vap->iv_opmode == IEEE80211_M_IBSS); 4369 if (stamode && nstate == IEEE80211_S_RUN) { 4370 sc->sc_curaid = ni->ni_associd; 4371 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 4372 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 4373 } 4374 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 4375 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 4376 ath_hal_setrxfilter(ah, rfilt); 4377 4378 /* XXX is this to restore keycache on resume? */ 4379 if (vap->iv_opmode != IEEE80211_M_STA && 4380 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 4381 for (i = 0; i < IEEE80211_WEP_NKID; i++) 4382 if (ath_hal_keyisvalid(ah, i)) 4383 ath_hal_keysetmac(ah, i, ni->ni_bssid); 4384 } 4385 4386 /* 4387 * Invoke the parent method to do net80211 work. 4388 */ 4389 error = avp->av_newstate(vap, nstate, arg); 4390 if (error != 0) 4391 goto bad; 4392 4393 /* 4394 * See above: ensure av_newstate() doesn't drop the lock 4395 * on us. 4396 */ 4397 IEEE80211_LOCK_ASSERT(ic); 4398 4399 if (nstate == IEEE80211_S_RUN) { 4400 /* NB: collect bss node again, it may have changed */ 4401 ieee80211_free_node(ni); 4402 ni = ieee80211_ref_node(vap->iv_bss); 4403 4404 DPRINTF(sc, ATH_DEBUG_STATE, 4405 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 4406 "capinfo 0x%04x chan %d\n", __func__, 4407 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 4408 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 4409 4410 switch (vap->iv_opmode) { 4411#ifdef IEEE80211_SUPPORT_TDMA 4412 case IEEE80211_M_AHDEMO: 4413 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 4414 break; 4415 /* fall thru... */ 4416#endif 4417 case IEEE80211_M_HOSTAP: 4418 case IEEE80211_M_IBSS: 4419 case IEEE80211_M_MBSS: 4420 /* 4421 * Allocate and setup the beacon frame. 4422 * 4423 * Stop any previous beacon DMA. This may be 4424 * necessary, for example, when an ibss merge 4425 * causes reconfiguration; there will be a state 4426 * transition from RUN->RUN that means we may 4427 * be called with beacon transmission active. 4428 */ 4429 ath_hal_stoptxdma(ah, sc->sc_bhalq); 4430 4431 error = ath_beacon_alloc(sc, ni); 4432 if (error != 0) 4433 goto bad; 4434 /* 4435 * If joining an adhoc network defer beacon timer 4436 * configuration to the next beacon frame so we 4437 * have a current TSF to use. Otherwise we're 4438 * starting an ibss/bss so there's no need to delay; 4439 * if this is the first vap moving to RUN state, then 4440 * beacon state needs to be [re]configured. 4441 */ 4442 if (vap->iv_opmode == IEEE80211_M_IBSS && 4443 ni->ni_tstamp.tsf != 0) { 4444 sc->sc_syncbeacon = 1; 4445 } else if (!sc->sc_beacons) { 4446#ifdef IEEE80211_SUPPORT_TDMA 4447 if (vap->iv_caps & IEEE80211_C_TDMA) 4448 ath_tdma_config(sc, vap); 4449 else 4450#endif 4451 ath_beacon_config(sc, vap); 4452 sc->sc_beacons = 1; 4453 } 4454 break; 4455 case IEEE80211_M_STA: 4456 /* 4457 * Defer beacon timer configuration to the next 4458 * beacon frame so we have a current TSF to use 4459 * (any TSF collected when scanning is likely old). 4460 * However if it's due to a CSA -> RUN transition, 4461 * force a beacon update so we pick up a lack of 4462 * beacons from an AP in CAC and thus force a 4463 * scan. 4464 */ 4465 sc->sc_syncbeacon = 1; 4466 if (csa_run_transition) 4467 ath_beacon_config(sc, vap); 4468 break; 4469 case IEEE80211_M_MONITOR: 4470 /* 4471 * Monitor mode vaps have only INIT->RUN and RUN->RUN 4472 * transitions so we must re-enable interrupts here to 4473 * handle the case of a single monitor mode vap. 4474 */ 4475 ath_hal_intrset(ah, sc->sc_imask); 4476 break; 4477 case IEEE80211_M_WDS: 4478 break; 4479 default: 4480 break; 4481 } 4482 /* 4483 * Let the hal process statistics collected during a 4484 * scan so it can provide calibrated noise floor data. 4485 */ 4486 ath_hal_process_noisefloor(ah); 4487 /* 4488 * Reset rssi stats; maybe not the best place... 4489 */ 4490 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 4491 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 4492 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 4493 /* 4494 * Finally, start any timers and the task q thread 4495 * (in case we didn't go through SCAN state). 4496 */ 4497 if (ath_longcalinterval != 0) { 4498 /* start periodic recalibration timer */ 4499 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 4500 } else { 4501 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 4502 "%s: calibration disabled\n", __func__); 4503 } 4504 taskqueue_unblock(sc->sc_tq); 4505 } else if (nstate == IEEE80211_S_INIT) { 4506 /* 4507 * If there are no vaps left in RUN state then 4508 * shutdown host/driver operation: 4509 * o disable interrupts 4510 * o disable the task queue thread 4511 * o mark beacon processing as stopped 4512 */ 4513 if (!ath_isanyrunningvaps(vap)) { 4514 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 4515 /* disable interrupts */ 4516 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 4517 taskqueue_block(sc->sc_tq); 4518 sc->sc_beacons = 0; 4519 } 4520#ifdef IEEE80211_SUPPORT_TDMA 4521 ath_hal_setcca(ah, AH_TRUE); 4522#endif 4523 } 4524bad: 4525 ieee80211_free_node(ni); 4526 return error; 4527} 4528 4529/* 4530 * Allocate a key cache slot to the station so we can 4531 * setup a mapping from key index to node. The key cache 4532 * slot is needed for managing antenna state and for 4533 * compression when stations do not use crypto. We do 4534 * it uniliaterally here; if crypto is employed this slot 4535 * will be reassigned. 4536 */ 4537static void 4538ath_setup_stationkey(struct ieee80211_node *ni) 4539{ 4540 struct ieee80211vap *vap = ni->ni_vap; 4541 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4542 ieee80211_keyix keyix, rxkeyix; 4543 4544 /* XXX should take a locked ref to vap->iv_bss */ 4545 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 4546 /* 4547 * Key cache is full; we'll fall back to doing 4548 * the more expensive lookup in software. Note 4549 * this also means no h/w compression. 4550 */ 4551 /* XXX msg+statistic */ 4552 } else { 4553 /* XXX locking? */ 4554 ni->ni_ucastkey.wk_keyix = keyix; 4555 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 4556 /* NB: must mark device key to get called back on delete */ 4557 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 4558 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 4559 /* NB: this will create a pass-thru key entry */ 4560 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 4561 } 4562} 4563 4564/* 4565 * Setup driver-specific state for a newly associated node. 4566 * Note that we're called also on a re-associate, the isnew 4567 * param tells us if this is the first time or not. 4568 */ 4569static void 4570ath_newassoc(struct ieee80211_node *ni, int isnew) 4571{ 4572 struct ath_node *an = ATH_NODE(ni); 4573 struct ieee80211vap *vap = ni->ni_vap; 4574 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4575 const struct ieee80211_txparam *tp = ni->ni_txparms; 4576 4577 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 4578 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 4579 4580 ath_rate_newassoc(sc, an, isnew); 4581 if (isnew && 4582 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 4583 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 4584 ath_setup_stationkey(ni); 4585} 4586 4587static int 4588ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 4589 int nchans, struct ieee80211_channel chans[]) 4590{ 4591 struct ath_softc *sc = ic->ic_ifp->if_softc; 4592 struct ath_hal *ah = sc->sc_ah; 4593 HAL_STATUS status; 4594 4595 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4596 "%s: rd %u cc %u location %c%s\n", 4597 __func__, reg->regdomain, reg->country, reg->location, 4598 reg->ecm ? " ecm" : ""); 4599 4600 status = ath_hal_set_channels(ah, chans, nchans, 4601 reg->country, reg->regdomain); 4602 if (status != HAL_OK) { 4603 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 4604 __func__, status); 4605 return EINVAL; /* XXX */ 4606 } 4607 4608 return 0; 4609} 4610 4611static void 4612ath_getradiocaps(struct ieee80211com *ic, 4613 int maxchans, int *nchans, struct ieee80211_channel chans[]) 4614{ 4615 struct ath_softc *sc = ic->ic_ifp->if_softc; 4616 struct ath_hal *ah = sc->sc_ah; 4617 4618 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 4619 __func__, SKU_DEBUG, CTRY_DEFAULT); 4620 4621 /* XXX check return */ 4622 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 4623 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 4624 4625} 4626 4627static int 4628ath_getchannels(struct ath_softc *sc) 4629{ 4630 struct ifnet *ifp = sc->sc_ifp; 4631 struct ieee80211com *ic = ifp->if_l2com; 4632 struct ath_hal *ah = sc->sc_ah; 4633 HAL_STATUS status; 4634 4635 /* 4636 * Collect channel set based on EEPROM contents. 4637 */ 4638 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 4639 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 4640 if (status != HAL_OK) { 4641 if_printf(ifp, "%s: unable to collect channel list from hal, " 4642 "status %d\n", __func__, status); 4643 return EINVAL; 4644 } 4645 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 4646 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 4647 /* XXX map Atheros sku's to net80211 SKU's */ 4648 /* XXX net80211 types too small */ 4649 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 4650 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 4651 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 4652 ic->ic_regdomain.isocc[1] = ' '; 4653 4654 ic->ic_regdomain.ecm = 1; 4655 ic->ic_regdomain.location = 'I'; 4656 4657 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 4658 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 4659 __func__, sc->sc_eerd, sc->sc_eecc, 4660 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 4661 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 4662 return 0; 4663} 4664 4665static int 4666ath_rate_setup(struct ath_softc *sc, u_int mode) 4667{ 4668 struct ath_hal *ah = sc->sc_ah; 4669 const HAL_RATE_TABLE *rt; 4670 4671 switch (mode) { 4672 case IEEE80211_MODE_11A: 4673 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 4674 break; 4675 case IEEE80211_MODE_HALF: 4676 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 4677 break; 4678 case IEEE80211_MODE_QUARTER: 4679 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 4680 break; 4681 case IEEE80211_MODE_11B: 4682 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 4683 break; 4684 case IEEE80211_MODE_11G: 4685 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 4686 break; 4687 case IEEE80211_MODE_TURBO_A: 4688 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 4689 break; 4690 case IEEE80211_MODE_TURBO_G: 4691 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 4692 break; 4693 case IEEE80211_MODE_STURBO_A: 4694 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 4695 break; 4696 case IEEE80211_MODE_11NA: 4697 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 4698 break; 4699 case IEEE80211_MODE_11NG: 4700 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 4701 break; 4702 default: 4703 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 4704 __func__, mode); 4705 return 0; 4706 } 4707 sc->sc_rates[mode] = rt; 4708 return (rt != NULL); 4709} 4710 4711static void 4712ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 4713{ 4714#define N(a) (sizeof(a)/sizeof(a[0])) 4715 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 4716 static const struct { 4717 u_int rate; /* tx/rx 802.11 rate */ 4718 u_int16_t timeOn; /* LED on time (ms) */ 4719 u_int16_t timeOff; /* LED off time (ms) */ 4720 } blinkrates[] = { 4721 { 108, 40, 10 }, 4722 { 96, 44, 11 }, 4723 { 72, 50, 13 }, 4724 { 48, 57, 14 }, 4725 { 36, 67, 16 }, 4726 { 24, 80, 20 }, 4727 { 22, 100, 25 }, 4728 { 18, 133, 34 }, 4729 { 12, 160, 40 }, 4730 { 10, 200, 50 }, 4731 { 6, 240, 58 }, 4732 { 4, 267, 66 }, 4733 { 2, 400, 100 }, 4734 { 0, 500, 130 }, 4735 /* XXX half/quarter rates */ 4736 }; 4737 const HAL_RATE_TABLE *rt; 4738 int i, j; 4739 4740 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 4741 rt = sc->sc_rates[mode]; 4742 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 4743 for (i = 0; i < rt->rateCount; i++) { 4744 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4745 if (rt->info[i].phy != IEEE80211_T_HT) 4746 sc->sc_rixmap[ieeerate] = i; 4747 else 4748 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 4749 } 4750 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 4751 for (i = 0; i < N(sc->sc_hwmap); i++) { 4752 if (i >= rt->rateCount) { 4753 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 4754 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 4755 continue; 4756 } 4757 sc->sc_hwmap[i].ieeerate = 4758 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 4759 if (rt->info[i].phy == IEEE80211_T_HT) 4760 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 4761 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 4762 if (rt->info[i].shortPreamble || 4763 rt->info[i].phy == IEEE80211_T_OFDM) 4764 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 4765 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 4766 for (j = 0; j < N(blinkrates)-1; j++) 4767 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 4768 break; 4769 /* NB: this uses the last entry if the rate isn't found */ 4770 /* XXX beware of overlow */ 4771 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 4772 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 4773 } 4774 sc->sc_currates = rt; 4775 sc->sc_curmode = mode; 4776 /* 4777 * All protection frames are transmited at 2Mb/s for 4778 * 11g, otherwise at 1Mb/s. 4779 */ 4780 if (mode == IEEE80211_MODE_11G) 4781 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 4782 else 4783 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 4784 /* NB: caller is responsible for resetting rate control state */ 4785#undef N 4786} 4787 4788static void 4789ath_watchdog(void *arg) 4790{ 4791 struct ath_softc *sc = arg; 4792 int do_reset = 0; 4793 4794 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 4795 struct ifnet *ifp = sc->sc_ifp; 4796 uint32_t hangs; 4797 4798 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 4799 hangs != 0) { 4800 if_printf(ifp, "%s hang detected (0x%x)\n", 4801 hangs & 0xff ? "bb" : "mac", hangs); 4802 } else 4803 if_printf(ifp, "device timeout\n"); 4804 do_reset = 1; 4805 ifp->if_oerrors++; 4806 sc->sc_stats.ast_watchdog++; 4807 } 4808 4809 /* 4810 * We can't hold the lock across the ath_reset() call. 4811 * 4812 * And since this routine can't hold a lock and sleep, 4813 * do the reset deferred. 4814 */ 4815 if (do_reset) { 4816 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 4817 } 4818 4819 callout_schedule(&sc->sc_wd_ch, hz); 4820} 4821 4822#ifdef ATH_DIAGAPI 4823/* 4824 * Diagnostic interface to the HAL. This is used by various 4825 * tools to do things like retrieve register contents for 4826 * debugging. The mechanism is intentionally opaque so that 4827 * it can change frequently w/o concern for compatiblity. 4828 */ 4829static int 4830ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) 4831{ 4832 struct ath_hal *ah = sc->sc_ah; 4833 u_int id = ad->ad_id & ATH_DIAG_ID; 4834 void *indata = NULL; 4835 void *outdata = NULL; 4836 u_int32_t insize = ad->ad_in_size; 4837 u_int32_t outsize = ad->ad_out_size; 4838 int error = 0; 4839 4840 if (ad->ad_id & ATH_DIAG_IN) { 4841 /* 4842 * Copy in data. 4843 */ 4844 indata = malloc(insize, M_TEMP, M_NOWAIT); 4845 if (indata == NULL) { 4846 error = ENOMEM; 4847 goto bad; 4848 } 4849 error = copyin(ad->ad_in_data, indata, insize); 4850 if (error) 4851 goto bad; 4852 } 4853 if (ad->ad_id & ATH_DIAG_DYN) { 4854 /* 4855 * Allocate a buffer for the results (otherwise the HAL 4856 * returns a pointer to a buffer where we can read the 4857 * results). Note that we depend on the HAL leaving this 4858 * pointer for us to use below in reclaiming the buffer; 4859 * may want to be more defensive. 4860 */ 4861 outdata = malloc(outsize, M_TEMP, M_NOWAIT); 4862 if (outdata == NULL) { 4863 error = ENOMEM; 4864 goto bad; 4865 } 4866 } 4867 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { 4868 if (outsize < ad->ad_out_size) 4869 ad->ad_out_size = outsize; 4870 if (outdata != NULL) 4871 error = copyout(outdata, ad->ad_out_data, 4872 ad->ad_out_size); 4873 } else { 4874 error = EINVAL; 4875 } 4876bad: 4877 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) 4878 free(indata, M_TEMP); 4879 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) 4880 free(outdata, M_TEMP); 4881 return error; 4882} 4883#endif /* ATH_DIAGAPI */ 4884 4885static int 4886ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4887{ 4888#define IS_RUNNING(ifp) \ 4889 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 4890 struct ath_softc *sc = ifp->if_softc; 4891 struct ieee80211com *ic = ifp->if_l2com; 4892 struct ifreq *ifr = (struct ifreq *)data; 4893 const HAL_RATE_TABLE *rt; 4894 int error = 0; 4895 4896 switch (cmd) { 4897 case SIOCSIFFLAGS: 4898 ATH_LOCK(sc); 4899 if (IS_RUNNING(ifp)) { 4900 /* 4901 * To avoid rescanning another access point, 4902 * do not call ath_init() here. Instead, 4903 * only reflect promisc mode settings. 4904 */ 4905 ath_mode_init(sc); 4906 } else if (ifp->if_flags & IFF_UP) { 4907 /* 4908 * Beware of being called during attach/detach 4909 * to reset promiscuous mode. In that case we 4910 * will still be marked UP but not RUNNING. 4911 * However trying to re-init the interface 4912 * is the wrong thing to do as we've already 4913 * torn down much of our state. There's 4914 * probably a better way to deal with this. 4915 */ 4916 if (!sc->sc_invalid) 4917 ath_init(sc); /* XXX lose error */ 4918 } else { 4919 ath_stop_locked(ifp); 4920#ifdef notyet 4921 /* XXX must wakeup in places like ath_vap_delete */ 4922 if (!sc->sc_invalid) 4923 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); 4924#endif 4925 } 4926 ATH_UNLOCK(sc); 4927 break; 4928 case SIOCGIFMEDIA: 4929 case SIOCSIFMEDIA: 4930 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4931 break; 4932 case SIOCGATHSTATS: 4933 /* NB: embed these numbers to get a consistent view */ 4934 sc->sc_stats.ast_tx_packets = ifp->if_opackets; 4935 sc->sc_stats.ast_rx_packets = ifp->if_ipackets; 4936 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); 4937 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); 4938#ifdef IEEE80211_SUPPORT_TDMA 4939 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); 4940 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); 4941#endif 4942 rt = sc->sc_currates; 4943 sc->sc_stats.ast_tx_rate = 4944 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; 4945 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) 4946 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; 4947 return copyout(&sc->sc_stats, 4948 ifr->ifr_data, sizeof (sc->sc_stats)); 4949 case SIOCGATHAGSTATS: 4950 return copyout(&sc->sc_aggr_stats, 4951 ifr->ifr_data, sizeof (sc->sc_aggr_stats)); 4952 case SIOCZATHSTATS: 4953 error = priv_check(curthread, PRIV_DRIVER); 4954 if (error == 0) { 4955 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); 4956 memset(&sc->sc_aggr_stats, 0, 4957 sizeof(sc->sc_aggr_stats)); 4958 memset(&sc->sc_intr_stats, 0, 4959 sizeof(sc->sc_intr_stats)); 4960 } 4961 break; 4962#ifdef ATH_DIAGAPI 4963 case SIOCGATHDIAG: 4964 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); 4965 break; 4966 case SIOCGATHPHYERR: 4967 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); 4968 break; 4969#endif 4970 case SIOCGIFADDR: 4971 error = ether_ioctl(ifp, cmd, data); 4972 break; 4973 default: 4974 error = EINVAL; 4975 break; 4976 } 4977 return error; 4978#undef IS_RUNNING 4979} 4980 4981/* 4982 * Announce various information on device/driver attach. 4983 */ 4984static void 4985ath_announce(struct ath_softc *sc) 4986{ 4987 struct ifnet *ifp = sc->sc_ifp; 4988 struct ath_hal *ah = sc->sc_ah; 4989 4990 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", 4991 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 4992 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 4993 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 4994 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 4995 if (bootverbose) { 4996 int i; 4997 for (i = 0; i <= WME_AC_VO; i++) { 4998 struct ath_txq *txq = sc->sc_ac2q[i]; 4999 if_printf(ifp, "Use hw queue %u for %s traffic\n", 5000 txq->axq_qnum, ieee80211_wme_acnames[i]); 5001 } 5002 if_printf(ifp, "Use hw queue %u for CAB traffic\n", 5003 sc->sc_cabq->axq_qnum); 5004 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); 5005 } 5006 if (ath_rxbuf != ATH_RXBUF) 5007 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); 5008 if (ath_txbuf != ATH_TXBUF) 5009 if_printf(ifp, "using %u tx buffers\n", ath_txbuf); 5010 if (sc->sc_mcastkey && bootverbose) 5011 if_printf(ifp, "using multicast key search\n"); 5012} 5013 5014static void 5015ath_dfs_tasklet(void *p, int npending) 5016{ 5017 struct ath_softc *sc = (struct ath_softc *) p; 5018 struct ifnet *ifp = sc->sc_ifp; 5019 struct ieee80211com *ic = ifp->if_l2com; 5020 5021 /* 5022 * If previous processing has found a radar event, 5023 * signal this to the net80211 layer to begin DFS 5024 * processing. 5025 */ 5026 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 5027 /* DFS event found, initiate channel change */ 5028 /* 5029 * XXX doesn't currently tell us whether the event 5030 * XXX was found in the primary or extension 5031 * XXX channel! 5032 */ 5033 IEEE80211_LOCK(ic); 5034 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 5035 IEEE80211_UNLOCK(ic); 5036 } 5037} 5038 5039MODULE_VERSION(if_ath, 1); 5040MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 5041#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) 5042MODULE_DEPEND(if_ath, alq, 1, 1, 1); 5043#endif 5044