if_ath.c revision 330897
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 15 * redistribution must be conditioned upon including a substantially 16 * similar Disclaimer requirement for further binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 29 * THE POSSIBILITY OF SUCH DAMAGES. 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/11/sys/dev/ath/if_ath.c 330897 2018-03-14 03:19:51Z eadler $"); 34 35/* 36 * Driver for the Atheros Wireless LAN controller. 37 * 38 * This software is derived from work of Atsushi Onoe; his contribution 39 * is greatly appreciated. 40 */ 41 42#include "opt_inet.h" 43#include "opt_ath.h" 44/* 45 * This is needed for register operations which are performed 46 * by the driver - eg, calls to ath_hal_gettsf32(). 47 * 48 * It's also required for any AH_DEBUG checks in here, eg the 49 * module dependencies. 50 */ 51#include "opt_ah.h" 52#include "opt_wlan.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysctl.h> 57#include <sys/mbuf.h> 58#include <sys/malloc.h> 59#include <sys/lock.h> 60#include <sys/mutex.h> 61#include <sys/kernel.h> 62#include <sys/socket.h> 63#include <sys/sockio.h> 64#include <sys/errno.h> 65#include <sys/callout.h> 66#include <sys/bus.h> 67#include <sys/endian.h> 68#include <sys/kthread.h> 69#include <sys/taskqueue.h> 70#include <sys/priv.h> 71#include <sys/module.h> 72#include <sys/ktr.h> 73#include <sys/smp.h> /* for mp_ncpus */ 74 75#include <machine/bus.h> 76 77#include <net/if.h> 78#include <net/if_var.h> 79#include <net/if_dl.h> 80#include <net/if_media.h> 81#include <net/if_types.h> 82#include <net/if_arp.h> 83#include <net/ethernet.h> 84#include <net/if_llc.h> 85 86#include <net80211/ieee80211_var.h> 87#include <net80211/ieee80211_regdomain.h> 88#ifdef IEEE80211_SUPPORT_SUPERG 89#include <net80211/ieee80211_superg.h> 90#endif 91#ifdef IEEE80211_SUPPORT_TDMA 92#include <net80211/ieee80211_tdma.h> 93#endif 94 95#include <net/bpf.h> 96 97#ifdef INET 98#include <netinet/in.h> 99#include <netinet/if_ether.h> 100#endif 101 102#include <dev/ath/if_athvar.h> 103#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */ 104#include <dev/ath/ath_hal/ah_diagcodes.h> 105 106#include <dev/ath/if_ath_debug.h> 107#include <dev/ath/if_ath_misc.h> 108#include <dev/ath/if_ath_tsf.h> 109#include <dev/ath/if_ath_tx.h> 110#include <dev/ath/if_ath_sysctl.h> 111#include <dev/ath/if_ath_led.h> 112#include <dev/ath/if_ath_keycache.h> 113#include <dev/ath/if_ath_rx.h> 114#include <dev/ath/if_ath_rx_edma.h> 115#include <dev/ath/if_ath_tx_edma.h> 116#include <dev/ath/if_ath_beacon.h> 117#include <dev/ath/if_ath_btcoex.h> 118#include <dev/ath/if_ath_btcoex_mci.h> 119#include <dev/ath/if_ath_spectral.h> 120#include <dev/ath/if_ath_lna_div.h> 121#include <dev/ath/if_athdfs.h> 122#include <dev/ath/if_ath_ioctl.h> 123#include <dev/ath/if_ath_descdma.h> 124 125#ifdef ATH_TX99_DIAG 126#include <dev/ath/ath_tx99/ath_tx99.h> 127#endif 128 129#ifdef ATH_DEBUG_ALQ 130#include <dev/ath/if_ath_alq.h> 131#endif 132 133/* 134 * Only enable this if you're working on PS-POLL support. 135 */ 136#define ATH_SW_PSQ 137 138/* 139 * ATH_BCBUF determines the number of vap's that can transmit 140 * beacons and also (currently) the number of vap's that can 141 * have unique mac addresses/bssid. When staggering beacons 142 * 4 is probably a good max as otherwise the beacons become 143 * very closely spaced and there is limited time for cab q traffic 144 * to go out. You can burst beacons instead but that is not good 145 * for stations in power save and at some point you really want 146 * another radio (and channel). 147 * 148 * The limit on the number of mac addresses is tied to our use of 149 * the U/L bit and tracking addresses in a byte; it would be 150 * worthwhile to allow more for applications like proxy sta. 151 */ 152CTASSERT(ATH_BCBUF <= 8); 153 154static struct ieee80211vap *ath_vap_create(struct ieee80211com *, 155 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 156 const uint8_t [IEEE80211_ADDR_LEN], 157 const uint8_t [IEEE80211_ADDR_LEN]); 158static void ath_vap_delete(struct ieee80211vap *); 159static int ath_init(struct ath_softc *); 160static void ath_stop(struct ath_softc *); 161static int ath_reset_vap(struct ieee80211vap *, u_long); 162static int ath_transmit(struct ieee80211com *, struct mbuf *); 163static int ath_media_change(struct ifnet *); 164static void ath_watchdog(void *); 165static void ath_parent(struct ieee80211com *); 166static void ath_fatal_proc(void *, int); 167static void ath_bmiss_vap(struct ieee80211vap *); 168static void ath_bmiss_proc(void *, int); 169static void ath_key_update_begin(struct ieee80211vap *); 170static void ath_key_update_end(struct ieee80211vap *); 171static void ath_update_mcast_hw(struct ath_softc *); 172static void ath_update_mcast(struct ieee80211com *); 173static void ath_update_promisc(struct ieee80211com *); 174static void ath_updateslot(struct ieee80211com *); 175static void ath_bstuck_proc(void *, int); 176static void ath_reset_proc(void *, int); 177static int ath_desc_alloc(struct ath_softc *); 178static void ath_desc_free(struct ath_softc *); 179static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, 180 const uint8_t [IEEE80211_ADDR_LEN]); 181static void ath_node_cleanup(struct ieee80211_node *); 182static void ath_node_free(struct ieee80211_node *); 183static void ath_node_getsignal(const struct ieee80211_node *, 184 int8_t *, int8_t *); 185static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); 186static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); 187static int ath_tx_setup(struct ath_softc *, int, int); 188static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); 189static void ath_tx_cleanup(struct ath_softc *); 190static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, 191 int dosched); 192static void ath_tx_proc_q0(void *, int); 193static void ath_tx_proc_q0123(void *, int); 194static void ath_tx_proc(void *, int); 195static void ath_txq_sched_tasklet(void *, int); 196static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); 197static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); 198static void ath_scan_start(struct ieee80211com *); 199static void ath_scan_end(struct ieee80211com *); 200static void ath_set_channel(struct ieee80211com *); 201#ifdef ATH_ENABLE_11N 202static void ath_update_chw(struct ieee80211com *); 203#endif /* ATH_ENABLE_11N */ 204static void ath_calibrate(void *); 205static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); 206static void ath_setup_stationkey(struct ieee80211_node *); 207static void ath_newassoc(struct ieee80211_node *, int); 208static int ath_setregdomain(struct ieee80211com *, 209 struct ieee80211_regdomain *, int, 210 struct ieee80211_channel []); 211static void ath_getradiocaps(struct ieee80211com *, int, int *, 212 struct ieee80211_channel []); 213static int ath_getchannels(struct ath_softc *); 214 215static int ath_rate_setup(struct ath_softc *, u_int mode); 216static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); 217 218static void ath_announce(struct ath_softc *); 219 220static void ath_dfs_tasklet(void *, int); 221static void ath_node_powersave(struct ieee80211_node *, int); 222static int ath_node_set_tim(struct ieee80211_node *, int); 223static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); 224 225#ifdef IEEE80211_SUPPORT_TDMA 226#include <dev/ath/if_ath_tdma.h> 227#endif 228 229SYSCTL_DECL(_hw_ath); 230 231/* XXX validate sysctl values */ 232static int ath_longcalinterval = 30; /* long cals every 30 secs */ 233SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 234 0, "long chip calibration interval (secs)"); 235static int ath_shortcalinterval = 100; /* short cals every 100 ms */ 236SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 237 0, "short chip calibration interval (msecs)"); 238static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ 239SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 240 0, "reset chip calibration results (secs)"); 241static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ 242SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 243 0, "ANI calibration (msecs)"); 244 245int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ 246SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, 247 0, "rx buffers allocated"); 248int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ 249SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, 250 0, "tx buffers allocated"); 251int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ 252SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, 253 0, "tx (mgmt) buffers allocated"); 254 255int ath_bstuck_threshold = 4; /* max missed beacons */ 256SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 257 0, "max missed beacon xmits before chip reset"); 258 259MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); 260 261void 262ath_legacy_attach_comp_func(struct ath_softc *sc) 263{ 264 265 /* 266 * Special case certain configurations. Note the 267 * CAB queue is handled by these specially so don't 268 * include them when checking the txq setup mask. 269 */ 270 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { 271 case 0x01: 272 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); 273 break; 274 case 0x0f: 275 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); 276 break; 277 default: 278 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); 279 break; 280 } 281} 282 283/* 284 * Set the target power mode. 285 * 286 * If this is called during a point in time where 287 * the hardware is being programmed elsewhere, it will 288 * simply store it away and update it when all current 289 * uses of the hardware are completed. 290 */ 291void 292_ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line) 293{ 294 ATH_LOCK_ASSERT(sc); 295 296 sc->sc_target_powerstate = power_state; 297 298 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 299 __func__, 300 file, 301 line, 302 power_state, 303 sc->sc_powersave_refcnt); 304 305 if (sc->sc_powersave_refcnt == 0 && 306 power_state != sc->sc_cur_powerstate) { 307 sc->sc_cur_powerstate = power_state; 308 ath_hal_setpower(sc->sc_ah, power_state); 309 310 /* 311 * If the NIC is force-awake, then set the 312 * self-gen frame state appropriately. 313 * 314 * If the nic is in network sleep or full-sleep, 315 * we let the above call leave the self-gen 316 * state as "sleep". 317 */ 318 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 319 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 320 ath_hal_setselfgenpower(sc->sc_ah, 321 sc->sc_target_selfgen_state); 322 } 323 } 324} 325 326/* 327 * Set the current self-generated frames state. 328 * 329 * This is separate from the target power mode. The chip may be 330 * awake but the desired state is "sleep", so frames sent to the 331 * destination has PWRMGT=1 in the 802.11 header. The NIC also 332 * needs to know to set PWRMGT=1 in self-generated frames. 333 */ 334void 335_ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) 336{ 337 338 ATH_LOCK_ASSERT(sc); 339 340 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 341 __func__, 342 file, 343 line, 344 power_state, 345 sc->sc_target_selfgen_state); 346 347 sc->sc_target_selfgen_state = power_state; 348 349 /* 350 * If the NIC is force-awake, then set the power state. 351 * Network-state and full-sleep will already transition it to 352 * mark self-gen frames as sleeping - and we can't 353 * guarantee the NIC is awake to program the self-gen frame 354 * setting anyway. 355 */ 356 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { 357 ath_hal_setselfgenpower(sc->sc_ah, power_state); 358 } 359} 360 361/* 362 * Set the hardware power mode and take a reference. 363 * 364 * This doesn't update the target power mode in the driver; 365 * it just updates the hardware power state. 366 * 367 * XXX it should only ever force the hardware awake; it should 368 * never be called to set it asleep. 369 */ 370void 371_ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) 372{ 373 ATH_LOCK_ASSERT(sc); 374 375 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", 376 __func__, 377 file, 378 line, 379 power_state, 380 sc->sc_powersave_refcnt); 381 382 sc->sc_powersave_refcnt++; 383 384 if (power_state != sc->sc_cur_powerstate) { 385 ath_hal_setpower(sc->sc_ah, power_state); 386 sc->sc_cur_powerstate = power_state; 387 388 /* 389 * Adjust the self-gen powerstate if appropriate. 390 */ 391 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 392 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 393 ath_hal_setselfgenpower(sc->sc_ah, 394 sc->sc_target_selfgen_state); 395 } 396 397 } 398} 399 400/* 401 * Restore the power save mode to what it once was. 402 * 403 * This will decrement the reference counter and once it hits 404 * zero, it'll restore the powersave state. 405 */ 406void 407_ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) 408{ 409 410 ATH_LOCK_ASSERT(sc); 411 412 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", 413 __func__, 414 file, 415 line, 416 sc->sc_powersave_refcnt, 417 sc->sc_target_powerstate); 418 419 if (sc->sc_powersave_refcnt == 0) 420 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); 421 else 422 sc->sc_powersave_refcnt--; 423 424 if (sc->sc_powersave_refcnt == 0 && 425 sc->sc_target_powerstate != sc->sc_cur_powerstate) { 426 sc->sc_cur_powerstate = sc->sc_target_powerstate; 427 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); 428 } 429 430 /* 431 * Adjust the self-gen powerstate if appropriate. 432 */ 433 if (sc->sc_cur_powerstate == HAL_PM_AWAKE && 434 sc->sc_target_selfgen_state != HAL_PM_AWAKE) { 435 ath_hal_setselfgenpower(sc->sc_ah, 436 sc->sc_target_selfgen_state); 437 } 438 439} 440 441/* 442 * Configure the initial HAL configuration values based on bus 443 * specific parameters. 444 * 445 * Some PCI IDs and other information may need tweaking. 446 * 447 * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable 448 * if BT antenna diversity isn't enabled. 449 * 450 * So, let's also figure out how to enable BT diversity for AR9485. 451 */ 452static void 453ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config) 454{ 455 /* XXX TODO: only for PCI devices? */ 456 457 if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) { 458 ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */ 459 ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE; 460 ah_config->ath_hal_min_gainidx = AH_TRUE; 461 ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88; 462 /* XXX low_rssi_thresh */ 463 /* XXX fast_div_bias */ 464 device_printf(sc->sc_dev, "configuring for %s\n", 465 (sc->sc_pci_devinfo & ATH_PCI_CUS198) ? 466 "CUS198" : "CUS230"); 467 } 468 469 if (sc->sc_pci_devinfo & ATH_PCI_CUS217) 470 device_printf(sc->sc_dev, "CUS217 card detected\n"); 471 472 if (sc->sc_pci_devinfo & ATH_PCI_CUS252) 473 device_printf(sc->sc_dev, "CUS252 card detected\n"); 474 475 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT) 476 device_printf(sc->sc_dev, "WB335 1-ANT card detected\n"); 477 478 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT) 479 device_printf(sc->sc_dev, "WB335 2-ANT card detected\n"); 480 481 if (sc->sc_pci_devinfo & ATH_PCI_BT_ANT_DIV) 482 device_printf(sc->sc_dev, 483 "Bluetooth Antenna Diversity card detected\n"); 484 485 if (sc->sc_pci_devinfo & ATH_PCI_KILLER) 486 device_printf(sc->sc_dev, "Killer Wireless card detected\n"); 487 488#if 0 489 /* 490 * Some WB335 cards do not support antenna diversity. Since 491 * we use a hardcoded value for AR9565 instead of using the 492 * EEPROM/OTP data, remove the combining feature from 493 * the HW capabilities bitmap. 494 */ 495 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { 496 if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV)) 497 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; 498 } 499 500 if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) { 501 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; 502 device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n"); 503 } 504#endif 505 506 if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) { 507 ah_config->ath_hal_pcie_waen = 0x0040473b; 508 device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n"); 509 } 510 511#if 0 512 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) { 513 ah->config.no_pll_pwrsave = true; 514 device_printf(sc->sc_dev, "Disable PLL PowerSave\n"); 515 } 516#endif 517 518} 519 520/* 521 * Attempt to fetch the MAC address from the kernel environment. 522 * 523 * Returns 0, macaddr in macaddr if successful; -1 otherwise. 524 */ 525static int 526ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr) 527{ 528 char devid_str[32]; 529 int local_mac = 0; 530 char *local_macstr; 531 532 /* 533 * Fetch from the kenv rather than using hints. 534 * 535 * Hints would be nice but the transition to dynamic 536 * hints/kenv doesn't happen early enough for this 537 * to work reliably (eg on anything embedded.) 538 */ 539 snprintf(devid_str, 32, "hint.%s.%d.macaddr", 540 device_get_name(sc->sc_dev), 541 device_get_unit(sc->sc_dev)); 542 543 if ((local_macstr = kern_getenv(devid_str)) != NULL) { 544 uint32_t tmpmac[ETHER_ADDR_LEN]; 545 int count; 546 int i; 547 548 /* Have a MAC address; should use it */ 549 device_printf(sc->sc_dev, 550 "Overriding MAC address from environment: '%s'\n", 551 local_macstr); 552 553 /* Extract out the MAC address */ 554 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", 555 &tmpmac[0], &tmpmac[1], 556 &tmpmac[2], &tmpmac[3], 557 &tmpmac[4], &tmpmac[5]); 558 if (count == 6) { 559 /* Valid! */ 560 local_mac = 1; 561 for (i = 0; i < ETHER_ADDR_LEN; i++) 562 macaddr[i] = tmpmac[i]; 563 } 564 /* Done! */ 565 freeenv(local_macstr); 566 local_macstr = NULL; 567 } 568 569 if (local_mac) 570 return (0); 571 return (-1); 572} 573 574#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) 575#define HAL_MODE_HT40 \ 576 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ 577 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) 578int 579ath_attach(u_int16_t devid, struct ath_softc *sc) 580{ 581 struct ieee80211com *ic = &sc->sc_ic; 582 struct ath_hal *ah = NULL; 583 HAL_STATUS status; 584 int error = 0, i; 585 u_int wmodes; 586 int rx_chainmask, tx_chainmask; 587 HAL_OPS_CONFIG ah_config; 588 589 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); 590 591 ic->ic_softc = sc; 592 ic->ic_name = device_get_nameunit(sc->sc_dev); 593 594 /* 595 * Configure the initial configuration data. 596 * 597 * This is stuff that may be needed early during attach 598 * rather than done via configuration calls later. 599 */ 600 bzero(&ah_config, sizeof(ah_config)); 601 ath_setup_hal_config(sc, &ah_config); 602 603 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, 604 sc->sc_eepromdata, &ah_config, &status); 605 if (ah == NULL) { 606 device_printf(sc->sc_dev, 607 "unable to attach hardware; HAL status %u\n", status); 608 error = ENXIO; 609 goto bad; 610 } 611 sc->sc_ah = ah; 612 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ 613#ifdef ATH_DEBUG 614 sc->sc_debug = ath_debug; 615#endif 616 617 /* 618 * Setup the DMA/EDMA functions based on the current 619 * hardware support. 620 * 621 * This is required before the descriptors are allocated. 622 */ 623 if (ath_hal_hasedma(sc->sc_ah)) { 624 sc->sc_isedma = 1; 625 ath_recv_setup_edma(sc); 626 ath_xmit_setup_edma(sc); 627 } else { 628 ath_recv_setup_legacy(sc); 629 ath_xmit_setup_legacy(sc); 630 } 631 632 if (ath_hal_hasmybeacon(sc->sc_ah)) { 633 sc->sc_do_mybeacon = 1; 634 } 635 636 /* 637 * Check if the MAC has multi-rate retry support. 638 * We do this by trying to setup a fake extended 639 * descriptor. MAC's that don't have support will 640 * return false w/o doing anything. MAC's that do 641 * support it will return true w/o doing anything. 642 */ 643 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); 644 645 /* 646 * Check if the device has hardware counters for PHY 647 * errors. If so we need to enable the MIB interrupt 648 * so we can act on stat triggers. 649 */ 650 if (ath_hal_hwphycounters(ah)) 651 sc->sc_needmib = 1; 652 653 /* 654 * Get the hardware key cache size. 655 */ 656 sc->sc_keymax = ath_hal_keycachesize(ah); 657 if (sc->sc_keymax > ATH_KEYMAX) { 658 device_printf(sc->sc_dev, 659 "Warning, using only %u of %u key cache slots\n", 660 ATH_KEYMAX, sc->sc_keymax); 661 sc->sc_keymax = ATH_KEYMAX; 662 } 663 /* 664 * Reset the key cache since some parts do not 665 * reset the contents on initial power up. 666 */ 667 for (i = 0; i < sc->sc_keymax; i++) 668 ath_hal_keyreset(ah, i); 669 670 /* 671 * Collect the default channel list. 672 */ 673 error = ath_getchannels(sc); 674 if (error != 0) 675 goto bad; 676 677 /* 678 * Setup rate tables for all potential media types. 679 */ 680 ath_rate_setup(sc, IEEE80211_MODE_11A); 681 ath_rate_setup(sc, IEEE80211_MODE_11B); 682 ath_rate_setup(sc, IEEE80211_MODE_11G); 683 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); 684 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); 685 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); 686 ath_rate_setup(sc, IEEE80211_MODE_11NA); 687 ath_rate_setup(sc, IEEE80211_MODE_11NG); 688 ath_rate_setup(sc, IEEE80211_MODE_HALF); 689 ath_rate_setup(sc, IEEE80211_MODE_QUARTER); 690 691 /* NB: setup here so ath_rate_update is happy */ 692 ath_setcurmode(sc, IEEE80211_MODE_11A); 693 694 /* 695 * Allocate TX descriptors and populate the lists. 696 */ 697 error = ath_desc_alloc(sc); 698 if (error != 0) { 699 device_printf(sc->sc_dev, 700 "failed to allocate TX descriptors: %d\n", error); 701 goto bad; 702 } 703 error = ath_txdma_setup(sc); 704 if (error != 0) { 705 device_printf(sc->sc_dev, 706 "failed to allocate TX descriptors: %d\n", error); 707 goto bad; 708 } 709 710 /* 711 * Allocate RX descriptors and populate the lists. 712 */ 713 error = ath_rxdma_setup(sc); 714 if (error != 0) { 715 device_printf(sc->sc_dev, 716 "failed to allocate RX descriptors: %d\n", error); 717 goto bad; 718 } 719 720 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); 721 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); 722 723 ATH_TXBUF_LOCK_INIT(sc); 724 725 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, 726 taskqueue_thread_enqueue, &sc->sc_tq); 727 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 728 device_get_nameunit(sc->sc_dev)); 729 730 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); 731 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); 732 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); 733 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); 734 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); 735 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); 736 737 /* 738 * Allocate hardware transmit queues: one queue for 739 * beacon frames and one data queue for each QoS 740 * priority. Note that the hal handles resetting 741 * these queues at the needed time. 742 * 743 * XXX PS-Poll 744 */ 745 sc->sc_bhalq = ath_beaconq_setup(sc); 746 if (sc->sc_bhalq == (u_int) -1) { 747 device_printf(sc->sc_dev, 748 "unable to setup a beacon xmit queue!\n"); 749 error = EIO; 750 goto bad2; 751 } 752 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); 753 if (sc->sc_cabq == NULL) { 754 device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n"); 755 error = EIO; 756 goto bad2; 757 } 758 /* NB: insure BK queue is the lowest priority h/w queue */ 759 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { 760 device_printf(sc->sc_dev, 761 "unable to setup xmit queue for %s traffic!\n", 762 ieee80211_wme_acnames[WME_AC_BK]); 763 error = EIO; 764 goto bad2; 765 } 766 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || 767 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || 768 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { 769 /* 770 * Not enough hardware tx queues to properly do WME; 771 * just punt and assign them all to the same h/w queue. 772 * We could do a better job of this if, for example, 773 * we allocate queues when we switch from station to 774 * AP mode. 775 */ 776 if (sc->sc_ac2q[WME_AC_VI] != NULL) 777 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); 778 if (sc->sc_ac2q[WME_AC_BE] != NULL) 779 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); 780 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; 781 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; 782 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; 783 } 784 785 /* 786 * Attach the TX completion function. 787 * 788 * The non-EDMA chips may have some special case optimisations; 789 * this method gives everyone a chance to attach cleanly. 790 */ 791 sc->sc_tx.xmit_attach_comp_func(sc); 792 793 /* 794 * Setup rate control. Some rate control modules 795 * call back to change the anntena state so expose 796 * the necessary entry points. 797 * XXX maybe belongs in struct ath_ratectrl? 798 */ 799 sc->sc_setdefantenna = ath_setdefantenna; 800 sc->sc_rc = ath_rate_attach(sc); 801 if (sc->sc_rc == NULL) { 802 error = EIO; 803 goto bad2; 804 } 805 806 /* Attach DFS module */ 807 if (! ath_dfs_attach(sc)) { 808 device_printf(sc->sc_dev, 809 "%s: unable to attach DFS\n", __func__); 810 error = EIO; 811 goto bad2; 812 } 813 814 /* Attach spectral module */ 815 if (ath_spectral_attach(sc) < 0) { 816 device_printf(sc->sc_dev, 817 "%s: unable to attach spectral\n", __func__); 818 error = EIO; 819 goto bad2; 820 } 821 822 /* Attach bluetooth coexistence module */ 823 if (ath_btcoex_attach(sc) < 0) { 824 device_printf(sc->sc_dev, 825 "%s: unable to attach bluetooth coexistence\n", __func__); 826 error = EIO; 827 goto bad2; 828 } 829 830 /* Attach LNA diversity module */ 831 if (ath_lna_div_attach(sc) < 0) { 832 device_printf(sc->sc_dev, 833 "%s: unable to attach LNA diversity\n", __func__); 834 error = EIO; 835 goto bad2; 836 } 837 838 /* Start DFS processing tasklet */ 839 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); 840 841 /* Configure LED state */ 842 sc->sc_blinking = 0; 843 sc->sc_ledstate = 1; 844 sc->sc_ledon = 0; /* low true */ 845 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ 846 callout_init(&sc->sc_ledtimer, 1); 847 848 /* 849 * Don't setup hardware-based blinking. 850 * 851 * Although some NICs may have this configured in the 852 * default reset register values, the user may wish 853 * to alter which pins have which function. 854 * 855 * The reference driver attaches the MAC network LED to GPIO1 and 856 * the MAC power LED to GPIO2. However, the DWA-552 cardbus 857 * NIC has these reversed. 858 */ 859 sc->sc_hardled = (1 == 0); 860 sc->sc_led_net_pin = -1; 861 sc->sc_led_pwr_pin = -1; 862 /* 863 * Auto-enable soft led processing for IBM cards and for 864 * 5211 minipci cards. Users can also manually enable/disable 865 * support with a sysctl. 866 */ 867 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); 868 ath_led_config(sc); 869 ath_hal_setledstate(ah, HAL_LED_INIT); 870 871 /* XXX not right but it's not used anywhere important */ 872 ic->ic_phytype = IEEE80211_T_OFDM; 873 ic->ic_opmode = IEEE80211_M_STA; 874 ic->ic_caps = 875 IEEE80211_C_STA /* station mode */ 876 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ 877 | IEEE80211_C_HOSTAP /* hostap mode */ 878 | IEEE80211_C_MONITOR /* monitor mode */ 879 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 880 | IEEE80211_C_WDS /* 4-address traffic works */ 881 | IEEE80211_C_MBSS /* mesh point link mode */ 882 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 883 | IEEE80211_C_SHSLOT /* short slot time supported */ 884 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ 885#ifndef ATH_ENABLE_11N 886 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 887#endif 888 | IEEE80211_C_TXFRAG /* handle tx frags */ 889#ifdef ATH_ENABLE_DFS 890 | IEEE80211_C_DFS /* Enable radar detection */ 891#endif 892 | IEEE80211_C_PMGT /* Station side power mgmt */ 893 | IEEE80211_C_SWSLEEP 894 ; 895 /* 896 * Query the hal to figure out h/w crypto support. 897 */ 898 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) 899 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; 900 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) 901 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; 902 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) 903 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; 904 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) 905 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; 906 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { 907 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; 908 /* 909 * Check if h/w does the MIC and/or whether the 910 * separate key cache entries are required to 911 * handle both tx+rx MIC keys. 912 */ 913 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) 914 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 915 /* 916 * If the h/w supports storing tx+rx MIC keys 917 * in one cache slot automatically enable use. 918 */ 919 if (ath_hal_hastkipsplit(ah) || 920 !ath_hal_settkipsplit(ah, AH_FALSE)) 921 sc->sc_splitmic = 1; 922 /* 923 * If the h/w can do TKIP MIC together with WME then 924 * we use it; otherwise we force the MIC to be done 925 * in software by the net80211 layer. 926 */ 927 if (ath_hal_haswmetkipmic(ah)) 928 sc->sc_wmetkipmic = 1; 929 } 930 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); 931 /* 932 * Check for multicast key search support. 933 */ 934 if (ath_hal_hasmcastkeysearch(sc->sc_ah) && 935 !ath_hal_getmcastkeysearch(sc->sc_ah)) { 936 ath_hal_setmcastkeysearch(sc->sc_ah, 1); 937 } 938 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); 939 /* 940 * Mark key cache slots associated with global keys 941 * as in use. If we knew TKIP was not to be used we 942 * could leave the +32, +64, and +32+64 slots free. 943 */ 944 for (i = 0; i < IEEE80211_WEP_NKID; i++) { 945 setbit(sc->sc_keymap, i); 946 setbit(sc->sc_keymap, i+64); 947 if (sc->sc_splitmic) { 948 setbit(sc->sc_keymap, i+32); 949 setbit(sc->sc_keymap, i+32+64); 950 } 951 } 952 /* 953 * TPC support can be done either with a global cap or 954 * per-packet support. The latter is not available on 955 * all parts. We're a bit pedantic here as all parts 956 * support a global cap. 957 */ 958 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) 959 ic->ic_caps |= IEEE80211_C_TXPMGT; 960 961 /* 962 * Mark WME capability only if we have sufficient 963 * hardware queues to do proper priority scheduling. 964 */ 965 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) 966 ic->ic_caps |= IEEE80211_C_WME; 967 /* 968 * Check for misc other capabilities. 969 */ 970 if (ath_hal_hasbursting(ah)) 971 ic->ic_caps |= IEEE80211_C_BURST; 972 sc->sc_hasbmask = ath_hal_hasbssidmask(ah); 973 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); 974 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); 975 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); 976 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); 977 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); 978 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); 979 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); 980 981 if (ath_hal_hasfastframes(ah)) 982 ic->ic_caps |= IEEE80211_C_FF; 983 wmodes = ath_hal_getwirelessmodes(ah); 984 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) 985 ic->ic_caps |= IEEE80211_C_TURBOP; 986#ifdef IEEE80211_SUPPORT_TDMA 987 if (ath_hal_macversion(ah) > 0x78) { 988 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ 989 ic->ic_tdma_update = ath_tdma_update; 990 } 991#endif 992 993 /* 994 * TODO: enforce that at least this many frames are available 995 * in the txbuf list before allowing data frames (raw or 996 * otherwise) to be transmitted. 997 */ 998 sc->sc_txq_data_minfree = 10; 999 /* 1000 * Leave this as default to maintain legacy behaviour. 1001 * Shortening the cabq/mcastq may end up causing some 1002 * undesirable behaviour. 1003 */ 1004 sc->sc_txq_mcastq_maxdepth = ath_txbuf; 1005 1006 /* 1007 * How deep can the node software TX queue get whilst it's asleep. 1008 */ 1009 sc->sc_txq_node_psq_maxdepth = 16; 1010 1011 /* 1012 * Default the maximum queue to to 1/4'th the TX buffers, or 1013 * 64, whichever is smaller. 1014 */ 1015 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); 1016 1017 /* Enable CABQ by default */ 1018 sc->sc_cabq_enable = 1; 1019 1020 /* 1021 * Allow the TX and RX chainmasks to be overridden by 1022 * environment variables and/or device.hints. 1023 * 1024 * This must be done early - before the hardware is 1025 * calibrated or before the 802.11n stream calculation 1026 * is done. 1027 */ 1028 if (resource_int_value(device_get_name(sc->sc_dev), 1029 device_get_unit(sc->sc_dev), "rx_chainmask", 1030 &rx_chainmask) == 0) { 1031 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", 1032 rx_chainmask); 1033 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); 1034 } 1035 if (resource_int_value(device_get_name(sc->sc_dev), 1036 device_get_unit(sc->sc_dev), "tx_chainmask", 1037 &tx_chainmask) == 0) { 1038 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", 1039 tx_chainmask); 1040 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); 1041 } 1042 1043 /* 1044 * Query the TX/RX chainmask configuration. 1045 * 1046 * This is only relevant for 11n devices. 1047 */ 1048 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); 1049 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); 1050 1051 /* 1052 * Disable MRR with protected frames by default. 1053 * Only 802.11n series NICs can handle this. 1054 */ 1055 sc->sc_mrrprot = 0; /* XXX should be a capability */ 1056 1057 /* 1058 * Query the enterprise mode information the HAL. 1059 */ 1060 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, 1061 &sc->sc_ent_cfg) == HAL_OK) 1062 sc->sc_use_ent = 1; 1063 1064#ifdef ATH_ENABLE_11N 1065 /* 1066 * Query HT capabilities 1067 */ 1068 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && 1069 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { 1070 uint32_t rxs, txs; 1071 uint32_t ldpc; 1072 1073 device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); 1074 1075 sc->sc_mrrprot = 1; /* XXX should be a capability */ 1076 1077 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ 1078 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ 1079 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ 1080 | IEEE80211_HTCAP_MAXAMSDU_3839 1081 /* max A-MSDU length */ 1082 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ 1083 1084 /* 1085 * Enable short-GI for HT20 only if the hardware 1086 * advertises support. 1087 * Notably, anything earlier than the AR9287 doesn't. 1088 */ 1089 if ((ath_hal_getcapability(ah, 1090 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && 1091 (wmodes & HAL_MODE_HT20)) { 1092 device_printf(sc->sc_dev, 1093 "[HT] enabling short-GI in 20MHz mode\n"); 1094 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; 1095 } 1096 1097 if (wmodes & HAL_MODE_HT40) 1098 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 1099 | IEEE80211_HTCAP_SHORTGI40; 1100 1101 /* 1102 * TX/RX streams need to be taken into account when 1103 * negotiating which MCS rates it'll receive and 1104 * what MCS rates are available for TX. 1105 */ 1106 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); 1107 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); 1108 ic->ic_txstream = txs; 1109 ic->ic_rxstream = rxs; 1110 1111 /* 1112 * Setup TX and RX STBC based on what the HAL allows and 1113 * the currently configured chainmask set. 1114 * Ie - don't enable STBC TX if only one chain is enabled. 1115 * STBC RX is fine on a single RX chain; it just won't 1116 * provide any real benefit. 1117 */ 1118 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, 1119 NULL) == HAL_OK) { 1120 sc->sc_rx_stbc = 1; 1121 device_printf(sc->sc_dev, 1122 "[HT] 1 stream STBC receive enabled\n"); 1123 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; 1124 } 1125 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, 1126 NULL) == HAL_OK) { 1127 sc->sc_tx_stbc = 1; 1128 device_printf(sc->sc_dev, 1129 "[HT] 1 stream STBC transmit enabled\n"); 1130 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; 1131 } 1132 1133 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, 1134 &sc->sc_rts_aggr_limit); 1135 if (sc->sc_rts_aggr_limit != (64 * 1024)) 1136 device_printf(sc->sc_dev, 1137 "[HT] RTS aggregates limited to %d KiB\n", 1138 sc->sc_rts_aggr_limit / 1024); 1139 1140 /* 1141 * LDPC 1142 */ 1143 if ((ath_hal_getcapability(ah, HAL_CAP_LDPC, 0, &ldpc)) 1144 == HAL_OK && (ldpc == 1)) { 1145 sc->sc_has_ldpc = 1; 1146 device_printf(sc->sc_dev, 1147 "[HT] LDPC transmit/receive enabled\n"); 1148 ic->ic_htcaps |= IEEE80211_HTCAP_LDPC; 1149 } 1150 1151 1152 device_printf(sc->sc_dev, 1153 "[HT] %d RX streams; %d TX streams\n", rxs, txs); 1154 } 1155#endif 1156 1157 /* 1158 * Initial aggregation settings. 1159 */ 1160 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; 1161 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; 1162 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; 1163 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; 1164 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; 1165 sc->sc_delim_min_pad = 0; 1166 1167 /* 1168 * Check if the hardware requires PCI register serialisation. 1169 * Some of the Owl based MACs require this. 1170 */ 1171 if (mp_ncpus > 1 && 1172 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 1173 0, NULL) == HAL_OK) { 1174 sc->sc_ah->ah_config.ah_serialise_reg_war = 1; 1175 device_printf(sc->sc_dev, 1176 "Enabling register serialisation\n"); 1177 } 1178 1179 /* 1180 * Initialise the deferred completed RX buffer list. 1181 */ 1182 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); 1183 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); 1184 1185 /* 1186 * Indicate we need the 802.11 header padded to a 1187 * 32-bit boundary for 4-address and QoS frames. 1188 */ 1189 ic->ic_flags |= IEEE80211_F_DATAPAD; 1190 1191 /* 1192 * Query the hal about antenna support. 1193 */ 1194 sc->sc_defant = ath_hal_getdefantenna(ah); 1195 1196 /* 1197 * Not all chips have the VEOL support we want to 1198 * use with IBSS beacons; check here for it. 1199 */ 1200 sc->sc_hasveol = ath_hal_hasveol(ah); 1201 1202 /* get mac address from kenv first, then hardware */ 1203 if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) { 1204 /* Tell the HAL now about the new MAC */ 1205 ath_hal_setmac(ah, ic->ic_macaddr); 1206 } else { 1207 ath_hal_getmac(ah, ic->ic_macaddr); 1208 } 1209 1210 if (sc->sc_hasbmask) 1211 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); 1212 1213 /* NB: used to size node table key mapping array */ 1214 ic->ic_max_keyix = sc->sc_keymax; 1215 /* call MI attach routine. */ 1216 ieee80211_ifattach(ic); 1217 ic->ic_setregdomain = ath_setregdomain; 1218 ic->ic_getradiocaps = ath_getradiocaps; 1219 sc->sc_opmode = HAL_M_STA; 1220 1221 /* override default methods */ 1222 ic->ic_ioctl = ath_ioctl; 1223 ic->ic_parent = ath_parent; 1224 ic->ic_transmit = ath_transmit; 1225 ic->ic_newassoc = ath_newassoc; 1226 ic->ic_updateslot = ath_updateslot; 1227 ic->ic_wme.wme_update = ath_wme_update; 1228 ic->ic_vap_create = ath_vap_create; 1229 ic->ic_vap_delete = ath_vap_delete; 1230 ic->ic_raw_xmit = ath_raw_xmit; 1231 ic->ic_update_mcast = ath_update_mcast; 1232 ic->ic_update_promisc = ath_update_promisc; 1233 ic->ic_node_alloc = ath_node_alloc; 1234 sc->sc_node_free = ic->ic_node_free; 1235 ic->ic_node_free = ath_node_free; 1236 sc->sc_node_cleanup = ic->ic_node_cleanup; 1237 ic->ic_node_cleanup = ath_node_cleanup; 1238 ic->ic_node_getsignal = ath_node_getsignal; 1239 ic->ic_scan_start = ath_scan_start; 1240 ic->ic_scan_end = ath_scan_end; 1241 ic->ic_set_channel = ath_set_channel; 1242#ifdef ATH_ENABLE_11N 1243 /* 802.11n specific - but just override anyway */ 1244 sc->sc_addba_request = ic->ic_addba_request; 1245 sc->sc_addba_response = ic->ic_addba_response; 1246 sc->sc_addba_stop = ic->ic_addba_stop; 1247 sc->sc_bar_response = ic->ic_bar_response; 1248 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; 1249 1250 ic->ic_addba_request = ath_addba_request; 1251 ic->ic_addba_response = ath_addba_response; 1252 ic->ic_addba_response_timeout = ath_addba_response_timeout; 1253 ic->ic_addba_stop = ath_addba_stop; 1254 ic->ic_bar_response = ath_bar_response; 1255 1256 ic->ic_update_chw = ath_update_chw; 1257#endif /* ATH_ENABLE_11N */ 1258 1259#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT 1260 /* 1261 * There's one vendor bitmap entry in the RX radiotap 1262 * header; make sure that's taken into account. 1263 */ 1264 ieee80211_radiotap_attachv(ic, 1265 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, 1266 ATH_TX_RADIOTAP_PRESENT, 1267 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, 1268 ATH_RX_RADIOTAP_PRESENT); 1269#else 1270 /* 1271 * No vendor bitmap/extensions are present. 1272 */ 1273 ieee80211_radiotap_attach(ic, 1274 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 1275 ATH_TX_RADIOTAP_PRESENT, 1276 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1277 ATH_RX_RADIOTAP_PRESENT); 1278#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ 1279 1280 /* 1281 * Setup the ALQ logging if required 1282 */ 1283#ifdef ATH_DEBUG_ALQ 1284 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); 1285 if_ath_alq_setcfg(&sc->sc_alq, 1286 sc->sc_ah->ah_macVersion, 1287 sc->sc_ah->ah_macRev, 1288 sc->sc_ah->ah_phyRev, 1289 sc->sc_ah->ah_magic); 1290#endif 1291 1292 /* 1293 * Setup dynamic sysctl's now that country code and 1294 * regdomain are available from the hal. 1295 */ 1296 ath_sysctlattach(sc); 1297 ath_sysctl_stats_attach(sc); 1298 ath_sysctl_hal_attach(sc); 1299 1300 if (bootverbose) 1301 ieee80211_announce(ic); 1302 ath_announce(sc); 1303 1304 /* 1305 * Put it to sleep for now. 1306 */ 1307 ATH_LOCK(sc); 1308 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 1309 ATH_UNLOCK(sc); 1310 1311 return 0; 1312bad2: 1313 ath_tx_cleanup(sc); 1314 ath_desc_free(sc); 1315 ath_txdma_teardown(sc); 1316 ath_rxdma_teardown(sc); 1317bad: 1318 if (ah) 1319 ath_hal_detach(ah); 1320 sc->sc_invalid = 1; 1321 return error; 1322} 1323 1324int 1325ath_detach(struct ath_softc *sc) 1326{ 1327 1328 /* 1329 * NB: the order of these is important: 1330 * o stop the chip so no more interrupts will fire 1331 * o call the 802.11 layer before detaching the hal to 1332 * insure callbacks into the driver to delete global 1333 * key cache entries can be handled 1334 * o free the taskqueue which drains any pending tasks 1335 * o reclaim the tx queue data structures after calling 1336 * the 802.11 layer as we'll get called back to reclaim 1337 * node state and potentially want to use them 1338 * o to cleanup the tx queues the hal is called, so detach 1339 * it last 1340 * Other than that, it's straightforward... 1341 */ 1342 1343 /* 1344 * XXX Wake the hardware up first. ath_stop() will still 1345 * wake it up first, but I'd rather do it here just to 1346 * ensure it's awake. 1347 */ 1348 ATH_LOCK(sc); 1349 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1350 ath_power_setpower(sc, HAL_PM_AWAKE); 1351 1352 /* 1353 * Stop things cleanly. 1354 */ 1355 ath_stop(sc); 1356 ATH_UNLOCK(sc); 1357 1358 ieee80211_ifdetach(&sc->sc_ic); 1359 taskqueue_free(sc->sc_tq); 1360#ifdef ATH_TX99_DIAG 1361 if (sc->sc_tx99 != NULL) 1362 sc->sc_tx99->detach(sc->sc_tx99); 1363#endif 1364 ath_rate_detach(sc->sc_rc); 1365#ifdef ATH_DEBUG_ALQ 1366 if_ath_alq_tidyup(&sc->sc_alq); 1367#endif 1368 ath_lna_div_detach(sc); 1369 ath_btcoex_detach(sc); 1370 ath_spectral_detach(sc); 1371 ath_dfs_detach(sc); 1372 ath_desc_free(sc); 1373 ath_txdma_teardown(sc); 1374 ath_rxdma_teardown(sc); 1375 ath_tx_cleanup(sc); 1376 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ 1377 1378 return 0; 1379} 1380 1381/* 1382 * MAC address handling for multiple BSS on the same radio. 1383 * The first vap uses the MAC address from the EEPROM. For 1384 * subsequent vap's we set the U/L bit (bit 1) in the MAC 1385 * address and use the next six bits as an index. 1386 */ 1387static void 1388assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) 1389{ 1390 int i; 1391 1392 if (clone && sc->sc_hasbmask) { 1393 /* NB: we only do this if h/w supports multiple bssid */ 1394 for (i = 0; i < 8; i++) 1395 if ((sc->sc_bssidmask & (1<<i)) == 0) 1396 break; 1397 if (i != 0) 1398 mac[0] |= (i << 2)|0x2; 1399 } else 1400 i = 0; 1401 sc->sc_bssidmask |= 1<<i; 1402 sc->sc_hwbssidmask[0] &= ~mac[0]; 1403 if (i == 0) 1404 sc->sc_nbssid0++; 1405} 1406 1407static void 1408reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) 1409{ 1410 int i = mac[0] >> 2; 1411 uint8_t mask; 1412 1413 if (i != 0 || --sc->sc_nbssid0 == 0) { 1414 sc->sc_bssidmask &= ~(1<<i); 1415 /* recalculate bssid mask from remaining addresses */ 1416 mask = 0xff; 1417 for (i = 1; i < 8; i++) 1418 if (sc->sc_bssidmask & (1<<i)) 1419 mask &= ~((i<<2)|0x2); 1420 sc->sc_hwbssidmask[0] |= mask; 1421 } 1422} 1423 1424/* 1425 * Assign a beacon xmit slot. We try to space out 1426 * assignments so when beacons are staggered the 1427 * traffic coming out of the cab q has maximal time 1428 * to go out before the next beacon is scheduled. 1429 */ 1430static int 1431assign_bslot(struct ath_softc *sc) 1432{ 1433 u_int slot, free; 1434 1435 free = 0; 1436 for (slot = 0; slot < ATH_BCBUF; slot++) 1437 if (sc->sc_bslot[slot] == NULL) { 1438 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && 1439 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) 1440 return slot; 1441 free = slot; 1442 /* NB: keep looking for a double slot */ 1443 } 1444 return free; 1445} 1446 1447static struct ieee80211vap * 1448ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1449 enum ieee80211_opmode opmode, int flags, 1450 const uint8_t bssid[IEEE80211_ADDR_LEN], 1451 const uint8_t mac0[IEEE80211_ADDR_LEN]) 1452{ 1453 struct ath_softc *sc = ic->ic_softc; 1454 struct ath_vap *avp; 1455 struct ieee80211vap *vap; 1456 uint8_t mac[IEEE80211_ADDR_LEN]; 1457 int needbeacon, error; 1458 enum ieee80211_opmode ic_opmode; 1459 1460 avp = malloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO); 1461 needbeacon = 0; 1462 IEEE80211_ADDR_COPY(mac, mac0); 1463 1464 ATH_LOCK(sc); 1465 ic_opmode = opmode; /* default to opmode of new vap */ 1466 switch (opmode) { 1467 case IEEE80211_M_STA: 1468 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ 1469 device_printf(sc->sc_dev, "only 1 sta vap supported\n"); 1470 goto bad; 1471 } 1472 if (sc->sc_nvaps) { 1473 /* 1474 * With multiple vaps we must fall back 1475 * to s/w beacon miss handling. 1476 */ 1477 flags |= IEEE80211_CLONE_NOBEACONS; 1478 } 1479 if (flags & IEEE80211_CLONE_NOBEACONS) { 1480 /* 1481 * Station mode w/o beacons are implemented w/ AP mode. 1482 */ 1483 ic_opmode = IEEE80211_M_HOSTAP; 1484 } 1485 break; 1486 case IEEE80211_M_IBSS: 1487 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ 1488 device_printf(sc->sc_dev, 1489 "only 1 ibss vap supported\n"); 1490 goto bad; 1491 } 1492 needbeacon = 1; 1493 break; 1494 case IEEE80211_M_AHDEMO: 1495#ifdef IEEE80211_SUPPORT_TDMA 1496 if (flags & IEEE80211_CLONE_TDMA) { 1497 if (sc->sc_nvaps != 0) { 1498 device_printf(sc->sc_dev, 1499 "only 1 tdma vap supported\n"); 1500 goto bad; 1501 } 1502 needbeacon = 1; 1503 flags |= IEEE80211_CLONE_NOBEACONS; 1504 } 1505 /* fall thru... */ 1506#endif 1507 case IEEE80211_M_MONITOR: 1508 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { 1509 /* 1510 * Adopt existing mode. Adding a monitor or ahdemo 1511 * vap to an existing configuration is of dubious 1512 * value but should be ok. 1513 */ 1514 /* XXX not right for monitor mode */ 1515 ic_opmode = ic->ic_opmode; 1516 } 1517 break; 1518 case IEEE80211_M_HOSTAP: 1519 case IEEE80211_M_MBSS: 1520 needbeacon = 1; 1521 break; 1522 case IEEE80211_M_WDS: 1523 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { 1524 device_printf(sc->sc_dev, 1525 "wds not supported in sta mode\n"); 1526 goto bad; 1527 } 1528 /* 1529 * Silently remove any request for a unique 1530 * bssid; WDS vap's always share the local 1531 * mac address. 1532 */ 1533 flags &= ~IEEE80211_CLONE_BSSID; 1534 if (sc->sc_nvaps == 0) 1535 ic_opmode = IEEE80211_M_HOSTAP; 1536 else 1537 ic_opmode = ic->ic_opmode; 1538 break; 1539 default: 1540 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); 1541 goto bad; 1542 } 1543 /* 1544 * Check that a beacon buffer is available; the code below assumes it. 1545 */ 1546 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { 1547 device_printf(sc->sc_dev, "no beacon buffer available\n"); 1548 goto bad; 1549 } 1550 1551 /* STA, AHDEMO? */ 1552 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { 1553 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); 1554 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1555 } 1556 1557 vap = &avp->av_vap; 1558 /* XXX can't hold mutex across if_alloc */ 1559 ATH_UNLOCK(sc); 1560 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 1561 ATH_LOCK(sc); 1562 if (error != 0) { 1563 device_printf(sc->sc_dev, "%s: error %d creating vap\n", 1564 __func__, error); 1565 goto bad2; 1566 } 1567 1568 /* h/w crypto support */ 1569 vap->iv_key_alloc = ath_key_alloc; 1570 vap->iv_key_delete = ath_key_delete; 1571 vap->iv_key_set = ath_key_set; 1572 vap->iv_key_update_begin = ath_key_update_begin; 1573 vap->iv_key_update_end = ath_key_update_end; 1574 1575 /* override various methods */ 1576 avp->av_recv_mgmt = vap->iv_recv_mgmt; 1577 vap->iv_recv_mgmt = ath_recv_mgmt; 1578 vap->iv_reset = ath_reset_vap; 1579 vap->iv_update_beacon = ath_beacon_update; 1580 avp->av_newstate = vap->iv_newstate; 1581 vap->iv_newstate = ath_newstate; 1582 avp->av_bmiss = vap->iv_bmiss; 1583 vap->iv_bmiss = ath_bmiss_vap; 1584 1585 avp->av_node_ps = vap->iv_node_ps; 1586 vap->iv_node_ps = ath_node_powersave; 1587 1588 avp->av_set_tim = vap->iv_set_tim; 1589 vap->iv_set_tim = ath_node_set_tim; 1590 1591 avp->av_recv_pspoll = vap->iv_recv_pspoll; 1592 vap->iv_recv_pspoll = ath_node_recv_pspoll; 1593 1594 /* Set default parameters */ 1595 1596 /* 1597 * Anything earlier than some AR9300 series MACs don't 1598 * support a smaller MPDU density. 1599 */ 1600 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; 1601 /* 1602 * All NICs can handle the maximum size, however 1603 * AR5416 based MACs can only TX aggregates w/ RTS 1604 * protection when the total aggregate size is <= 8k. 1605 * However, for now that's enforced by the TX path. 1606 */ 1607 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1608 1609 avp->av_bslot = -1; 1610 if (needbeacon) { 1611 /* 1612 * Allocate beacon state and setup the q for buffered 1613 * multicast frames. We know a beacon buffer is 1614 * available because we checked above. 1615 */ 1616 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); 1617 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); 1618 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { 1619 /* 1620 * Assign the vap to a beacon xmit slot. As above 1621 * this cannot fail to find a free one. 1622 */ 1623 avp->av_bslot = assign_bslot(sc); 1624 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, 1625 ("beacon slot %u not empty", avp->av_bslot)); 1626 sc->sc_bslot[avp->av_bslot] = vap; 1627 sc->sc_nbcnvaps++; 1628 } 1629 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { 1630 /* 1631 * Multple vaps are to transmit beacons and we 1632 * have h/w support for TSF adjusting; enable 1633 * use of staggered beacons. 1634 */ 1635 sc->sc_stagbeacons = 1; 1636 } 1637 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); 1638 } 1639 1640 ic->ic_opmode = ic_opmode; 1641 if (opmode != IEEE80211_M_WDS) { 1642 sc->sc_nvaps++; 1643 if (opmode == IEEE80211_M_STA) 1644 sc->sc_nstavaps++; 1645 if (opmode == IEEE80211_M_MBSS) 1646 sc->sc_nmeshvaps++; 1647 } 1648 switch (ic_opmode) { 1649 case IEEE80211_M_IBSS: 1650 sc->sc_opmode = HAL_M_IBSS; 1651 break; 1652 case IEEE80211_M_STA: 1653 sc->sc_opmode = HAL_M_STA; 1654 break; 1655 case IEEE80211_M_AHDEMO: 1656#ifdef IEEE80211_SUPPORT_TDMA 1657 if (vap->iv_caps & IEEE80211_C_TDMA) { 1658 sc->sc_tdma = 1; 1659 /* NB: disable tsf adjust */ 1660 sc->sc_stagbeacons = 0; 1661 } 1662 /* 1663 * NB: adhoc demo mode is a pseudo mode; to the hal it's 1664 * just ap mode. 1665 */ 1666 /* fall thru... */ 1667#endif 1668 case IEEE80211_M_HOSTAP: 1669 case IEEE80211_M_MBSS: 1670 sc->sc_opmode = HAL_M_HOSTAP; 1671 break; 1672 case IEEE80211_M_MONITOR: 1673 sc->sc_opmode = HAL_M_MONITOR; 1674 break; 1675 default: 1676 /* XXX should not happen */ 1677 break; 1678 } 1679 if (sc->sc_hastsfadd) { 1680 /* 1681 * Configure whether or not TSF adjust should be done. 1682 */ 1683 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); 1684 } 1685 if (flags & IEEE80211_CLONE_NOBEACONS) { 1686 /* 1687 * Enable s/w beacon miss handling. 1688 */ 1689 sc->sc_swbmiss = 1; 1690 } 1691 ATH_UNLOCK(sc); 1692 1693 /* complete setup */ 1694 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status, 1695 mac); 1696 return vap; 1697bad2: 1698 reclaim_address(sc, mac); 1699 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); 1700bad: 1701 free(avp, M_80211_VAP); 1702 ATH_UNLOCK(sc); 1703 return NULL; 1704} 1705 1706static void 1707ath_vap_delete(struct ieee80211vap *vap) 1708{ 1709 struct ieee80211com *ic = vap->iv_ic; 1710 struct ath_softc *sc = ic->ic_softc; 1711 struct ath_hal *ah = sc->sc_ah; 1712 struct ath_vap *avp = ATH_VAP(vap); 1713 1714 ATH_LOCK(sc); 1715 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1716 ATH_UNLOCK(sc); 1717 1718 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 1719 if (sc->sc_running) { 1720 /* 1721 * Quiesce the hardware while we remove the vap. In 1722 * particular we need to reclaim all references to 1723 * the vap state by any frames pending on the tx queues. 1724 */ 1725 ath_hal_intrset(ah, 0); /* disable interrupts */ 1726 /* XXX Do all frames from all vaps/nodes need draining here? */ 1727 ath_stoprecv(sc, 1); /* stop recv side */ 1728 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ 1729 } 1730 1731 /* .. leave the hardware awake for now. */ 1732 1733 ieee80211_vap_detach(vap); 1734 1735 /* 1736 * XXX Danger Will Robinson! Danger! 1737 * 1738 * Because ieee80211_vap_detach() can queue a frame (the station 1739 * diassociate message?) after we've drained the TXQ and 1740 * flushed the software TXQ, we will end up with a frame queued 1741 * to a node whose vap is about to be freed. 1742 * 1743 * To work around this, flush the hardware/software again. 1744 * This may be racy - the ath task may be running and the packet 1745 * may be being scheduled between sw->hw txq. Tsk. 1746 * 1747 * TODO: figure out why a new node gets allocated somewhere around 1748 * here (after the ath_tx_swq() call; and after an ath_stop() 1749 * call!) 1750 */ 1751 1752 ath_draintxq(sc, ATH_RESET_DEFAULT); 1753 1754 ATH_LOCK(sc); 1755 /* 1756 * Reclaim beacon state. Note this must be done before 1757 * the vap instance is reclaimed as we may have a reference 1758 * to it in the buffer for the beacon frame. 1759 */ 1760 if (avp->av_bcbuf != NULL) { 1761 if (avp->av_bslot != -1) { 1762 sc->sc_bslot[avp->av_bslot] = NULL; 1763 sc->sc_nbcnvaps--; 1764 } 1765 ath_beacon_return(sc, avp->av_bcbuf); 1766 avp->av_bcbuf = NULL; 1767 if (sc->sc_nbcnvaps == 0) { 1768 sc->sc_stagbeacons = 0; 1769 if (sc->sc_hastsfadd) 1770 ath_hal_settsfadjust(sc->sc_ah, 0); 1771 } 1772 /* 1773 * Reclaim any pending mcast frames for the vap. 1774 */ 1775 ath_tx_draintxq(sc, &avp->av_mcastq); 1776 } 1777 /* 1778 * Update bookkeeping. 1779 */ 1780 if (vap->iv_opmode == IEEE80211_M_STA) { 1781 sc->sc_nstavaps--; 1782 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) 1783 sc->sc_swbmiss = 0; 1784 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || 1785 vap->iv_opmode == IEEE80211_M_MBSS) { 1786 reclaim_address(sc, vap->iv_myaddr); 1787 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); 1788 if (vap->iv_opmode == IEEE80211_M_MBSS) 1789 sc->sc_nmeshvaps--; 1790 } 1791 if (vap->iv_opmode != IEEE80211_M_WDS) 1792 sc->sc_nvaps--; 1793#ifdef IEEE80211_SUPPORT_TDMA 1794 /* TDMA operation ceases when the last vap is destroyed */ 1795 if (sc->sc_tdma && sc->sc_nvaps == 0) { 1796 sc->sc_tdma = 0; 1797 sc->sc_swbmiss = 0; 1798 } 1799#endif 1800 free(avp, M_80211_VAP); 1801 1802 if (sc->sc_running) { 1803 /* 1804 * Restart rx+tx machines if still running (RUNNING will 1805 * be reset if we just destroyed the last vap). 1806 */ 1807 if (ath_startrecv(sc) != 0) 1808 device_printf(sc->sc_dev, 1809 "%s: unable to restart recv logic\n", __func__); 1810 if (sc->sc_beacons) { /* restart beacons */ 1811#ifdef IEEE80211_SUPPORT_TDMA 1812 if (sc->sc_tdma) 1813 ath_tdma_config(sc, NULL); 1814 else 1815#endif 1816 ath_beacon_config(sc, NULL); 1817 } 1818 ath_hal_intrset(ah, sc->sc_imask); 1819 } 1820 1821 /* Ok, let the hardware asleep. */ 1822 ath_power_restore_power_state(sc); 1823 ATH_UNLOCK(sc); 1824} 1825 1826void 1827ath_suspend(struct ath_softc *sc) 1828{ 1829 struct ieee80211com *ic = &sc->sc_ic; 1830 1831 sc->sc_resume_up = ic->ic_nrunning != 0; 1832 1833 ieee80211_suspend_all(ic); 1834 /* 1835 * NB: don't worry about putting the chip in low power 1836 * mode; pci will power off our socket on suspend and 1837 * CardBus detaches the device. 1838 * 1839 * XXX TODO: well, that's great, except for non-cardbus 1840 * devices! 1841 */ 1842 1843 /* 1844 * XXX This doesn't wait until all pending taskqueue 1845 * items and parallel transmit/receive/other threads 1846 * are running! 1847 */ 1848 ath_hal_intrset(sc->sc_ah, 0); 1849 taskqueue_block(sc->sc_tq); 1850 1851 ATH_LOCK(sc); 1852 callout_stop(&sc->sc_cal_ch); 1853 ATH_UNLOCK(sc); 1854 1855 /* 1856 * XXX ensure sc_invalid is 1 1857 */ 1858 1859 /* Disable the PCIe PHY, complete with workarounds */ 1860 ath_hal_enablepcie(sc->sc_ah, 1, 1); 1861} 1862 1863/* 1864 * Reset the key cache since some parts do not reset the 1865 * contents on resume. First we clear all entries, then 1866 * re-load keys that the 802.11 layer assumes are setup 1867 * in h/w. 1868 */ 1869static void 1870ath_reset_keycache(struct ath_softc *sc) 1871{ 1872 struct ieee80211com *ic = &sc->sc_ic; 1873 struct ath_hal *ah = sc->sc_ah; 1874 int i; 1875 1876 ATH_LOCK(sc); 1877 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1878 for (i = 0; i < sc->sc_keymax; i++) 1879 ath_hal_keyreset(ah, i); 1880 ath_power_restore_power_state(sc); 1881 ATH_UNLOCK(sc); 1882 ieee80211_crypto_reload_keys(ic); 1883} 1884 1885/* 1886 * Fetch the current chainmask configuration based on the current 1887 * operating channel and options. 1888 */ 1889static void 1890ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) 1891{ 1892 1893 /* 1894 * Set TX chainmask to the currently configured chainmask; 1895 * the TX chainmask depends upon the current operating mode. 1896 */ 1897 sc->sc_cur_rxchainmask = sc->sc_rxchainmask; 1898 if (IEEE80211_IS_CHAN_HT(chan)) { 1899 sc->sc_cur_txchainmask = sc->sc_txchainmask; 1900 } else { 1901 sc->sc_cur_txchainmask = 1; 1902 } 1903 1904 DPRINTF(sc, ATH_DEBUG_RESET, 1905 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", 1906 __func__, 1907 sc->sc_cur_txchainmask, 1908 sc->sc_cur_rxchainmask); 1909} 1910 1911void 1912ath_resume(struct ath_softc *sc) 1913{ 1914 struct ieee80211com *ic = &sc->sc_ic; 1915 struct ath_hal *ah = sc->sc_ah; 1916 HAL_STATUS status; 1917 1918 ath_hal_enablepcie(ah, 0, 0); 1919 1920 /* 1921 * Must reset the chip before we reload the 1922 * keycache as we were powered down on suspend. 1923 */ 1924 ath_update_chainmasks(sc, 1925 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); 1926 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 1927 sc->sc_cur_rxchainmask); 1928 1929 /* Ensure we set the current power state to on */ 1930 ATH_LOCK(sc); 1931 ath_power_setselfgen(sc, HAL_PM_AWAKE); 1932 ath_power_set_power_state(sc, HAL_PM_AWAKE); 1933 ath_power_setpower(sc, HAL_PM_AWAKE); 1934 ATH_UNLOCK(sc); 1935 1936 ath_hal_reset(ah, sc->sc_opmode, 1937 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, 1938 AH_FALSE, HAL_RESET_NORMAL, &status); 1939 ath_reset_keycache(sc); 1940 1941 ATH_RX_LOCK(sc); 1942 sc->sc_rx_stopped = 1; 1943 sc->sc_rx_resetted = 1; 1944 ATH_RX_UNLOCK(sc); 1945 1946 /* Let DFS at it in case it's a DFS channel */ 1947 ath_dfs_radar_enable(sc, ic->ic_curchan); 1948 1949 /* Let spectral at in case spectral is enabled */ 1950 ath_spectral_enable(sc, ic->ic_curchan); 1951 1952 /* 1953 * Let bluetooth coexistence at in case it's needed for this channel 1954 */ 1955 ath_btcoex_enable(sc, ic->ic_curchan); 1956 1957 /* 1958 * If we're doing TDMA, enforce the TXOP limitation for chips that 1959 * support it. 1960 */ 1961 if (sc->sc_hasenforcetxop && sc->sc_tdma) 1962 ath_hal_setenforcetxop(sc->sc_ah, 1); 1963 else 1964 ath_hal_setenforcetxop(sc->sc_ah, 0); 1965 1966 /* Restore the LED configuration */ 1967 ath_led_config(sc); 1968 ath_hal_setledstate(ah, HAL_LED_INIT); 1969 1970 if (sc->sc_resume_up) 1971 ieee80211_resume_all(ic); 1972 1973 ATH_LOCK(sc); 1974 ath_power_restore_power_state(sc); 1975 ATH_UNLOCK(sc); 1976 1977 /* XXX beacons ? */ 1978} 1979 1980void 1981ath_shutdown(struct ath_softc *sc) 1982{ 1983 1984 ATH_LOCK(sc); 1985 ath_stop(sc); 1986 ATH_UNLOCK(sc); 1987 /* NB: no point powering down chip as we're about to reboot */ 1988} 1989 1990/* 1991 * Interrupt handler. Most of the actual processing is deferred. 1992 */ 1993void 1994ath_intr(void *arg) 1995{ 1996 struct ath_softc *sc = arg; 1997 struct ath_hal *ah = sc->sc_ah; 1998 HAL_INT status = 0; 1999 uint32_t txqs; 2000 2001 /* 2002 * If we're inside a reset path, just print a warning and 2003 * clear the ISR. The reset routine will finish it for us. 2004 */ 2005 ATH_PCU_LOCK(sc); 2006 if (sc->sc_inreset_cnt) { 2007 HAL_INT status; 2008 ath_hal_getisr(ah, &status); /* clear ISR */ 2009 ath_hal_intrset(ah, 0); /* disable further intr's */ 2010 DPRINTF(sc, ATH_DEBUG_ANY, 2011 "%s: in reset, ignoring: status=0x%x\n", 2012 __func__, status); 2013 ATH_PCU_UNLOCK(sc); 2014 return; 2015 } 2016 2017 if (sc->sc_invalid) { 2018 /* 2019 * The hardware is not ready/present, don't touch anything. 2020 * Note this can happen early on if the IRQ is shared. 2021 */ 2022 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); 2023 ATH_PCU_UNLOCK(sc); 2024 return; 2025 } 2026 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ 2027 ATH_PCU_UNLOCK(sc); 2028 return; 2029 } 2030 2031 ATH_LOCK(sc); 2032 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2033 ATH_UNLOCK(sc); 2034 2035 if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) { 2036 HAL_INT status; 2037 2038 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n", 2039 __func__, sc->sc_ic.ic_nrunning, sc->sc_running); 2040 ath_hal_getisr(ah, &status); /* clear ISR */ 2041 ath_hal_intrset(ah, 0); /* disable further intr's */ 2042 ATH_PCU_UNLOCK(sc); 2043 2044 ATH_LOCK(sc); 2045 ath_power_restore_power_state(sc); 2046 ATH_UNLOCK(sc); 2047 return; 2048 } 2049 2050 /* 2051 * Figure out the reason(s) for the interrupt. Note 2052 * that the hal returns a pseudo-ISR that may include 2053 * bits we haven't explicitly enabled so we mask the 2054 * value to insure we only process bits we requested. 2055 */ 2056 ath_hal_getisr(ah, &status); /* NB: clears ISR too */ 2057 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); 2058 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); 2059#ifdef ATH_DEBUG_ALQ 2060 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, 2061 ah->ah_syncstate); 2062#endif /* ATH_DEBUG_ALQ */ 2063#ifdef ATH_KTR_INTR_DEBUG 2064 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, 2065 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", 2066 ah->ah_intrstate[0], 2067 ah->ah_intrstate[1], 2068 ah->ah_intrstate[2], 2069 ah->ah_intrstate[3], 2070 ah->ah_intrstate[6]); 2071#endif 2072 2073 /* Squirrel away SYNC interrupt debugging */ 2074 if (ah->ah_syncstate != 0) { 2075 int i; 2076 for (i = 0; i < 32; i++) 2077 if (ah->ah_syncstate & (i << i)) 2078 sc->sc_intr_stats.sync_intr[i]++; 2079 } 2080 2081 status &= sc->sc_imask; /* discard unasked for bits */ 2082 2083 /* Short-circuit un-handled interrupts */ 2084 if (status == 0x0) { 2085 ATH_PCU_UNLOCK(sc); 2086 2087 ATH_LOCK(sc); 2088 ath_power_restore_power_state(sc); 2089 ATH_UNLOCK(sc); 2090 2091 return; 2092 } 2093 2094 /* 2095 * Take a note that we're inside the interrupt handler, so 2096 * the reset routines know to wait. 2097 */ 2098 sc->sc_intr_cnt++; 2099 ATH_PCU_UNLOCK(sc); 2100 2101 /* 2102 * Handle the interrupt. We won't run concurrent with the reset 2103 * or channel change routines as they'll wait for sc_intr_cnt 2104 * to be 0 before continuing. 2105 */ 2106 if (status & HAL_INT_FATAL) { 2107 sc->sc_stats.ast_hardware++; 2108 ath_hal_intrset(ah, 0); /* disable intr's until reset */ 2109 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); 2110 } else { 2111 if (status & HAL_INT_SWBA) { 2112 /* 2113 * Software beacon alert--time to send a beacon. 2114 * Handle beacon transmission directly; deferring 2115 * this is too slow to meet timing constraints 2116 * under load. 2117 */ 2118#ifdef IEEE80211_SUPPORT_TDMA 2119 if (sc->sc_tdma) { 2120 if (sc->sc_tdmaswba == 0) { 2121 struct ieee80211com *ic = &sc->sc_ic; 2122 struct ieee80211vap *vap = 2123 TAILQ_FIRST(&ic->ic_vaps); 2124 ath_tdma_beacon_send(sc, vap); 2125 sc->sc_tdmaswba = 2126 vap->iv_tdma->tdma_bintval; 2127 } else 2128 sc->sc_tdmaswba--; 2129 } else 2130#endif 2131 { 2132 ath_beacon_proc(sc, 0); 2133#ifdef IEEE80211_SUPPORT_SUPERG 2134 /* 2135 * Schedule the rx taskq in case there's no 2136 * traffic so any frames held on the staging 2137 * queue are aged and potentially flushed. 2138 */ 2139 sc->sc_rx.recv_sched(sc, 1); 2140#endif 2141 } 2142 } 2143 if (status & HAL_INT_RXEOL) { 2144 int imask; 2145 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); 2146 if (! sc->sc_isedma) { 2147 ATH_PCU_LOCK(sc); 2148 /* 2149 * NB: the hardware should re-read the link when 2150 * RXE bit is written, but it doesn't work at 2151 * least on older hardware revs. 2152 */ 2153 sc->sc_stats.ast_rxeol++; 2154 /* 2155 * Disable RXEOL/RXORN - prevent an interrupt 2156 * storm until the PCU logic can be reset. 2157 * In case the interface is reset some other 2158 * way before "sc_kickpcu" is called, don't 2159 * modify sc_imask - that way if it is reset 2160 * by a call to ath_reset() somehow, the 2161 * interrupt mask will be correctly reprogrammed. 2162 */ 2163 imask = sc->sc_imask; 2164 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); 2165 ath_hal_intrset(ah, imask); 2166 /* 2167 * Only blank sc_rxlink if we've not yet kicked 2168 * the PCU. 2169 * 2170 * This isn't entirely correct - the correct solution 2171 * would be to have a PCU lock and engage that for 2172 * the duration of the PCU fiddling; which would include 2173 * running the RX process. Otherwise we could end up 2174 * messing up the RX descriptor chain and making the 2175 * RX desc list much shorter. 2176 */ 2177 if (! sc->sc_kickpcu) 2178 sc->sc_rxlink = NULL; 2179 sc->sc_kickpcu = 1; 2180 ATH_PCU_UNLOCK(sc); 2181 } 2182 /* 2183 * Enqueue an RX proc to handle whatever 2184 * is in the RX queue. 2185 * This will then kick the PCU if required. 2186 */ 2187 sc->sc_rx.recv_sched(sc, 1); 2188 } 2189 if (status & HAL_INT_TXURN) { 2190 sc->sc_stats.ast_txurn++; 2191 /* bump tx trigger level */ 2192 ath_hal_updatetxtriglevel(ah, AH_TRUE); 2193 } 2194 /* 2195 * Handle both the legacy and RX EDMA interrupt bits. 2196 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. 2197 */ 2198 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { 2199 sc->sc_stats.ast_rx_intr++; 2200 sc->sc_rx.recv_sched(sc, 1); 2201 } 2202 if (status & HAL_INT_TX) { 2203 sc->sc_stats.ast_tx_intr++; 2204 /* 2205 * Grab all the currently set bits in the HAL txq bitmap 2206 * and blank them. This is the only place we should be 2207 * doing this. 2208 */ 2209 if (! sc->sc_isedma) { 2210 ATH_PCU_LOCK(sc); 2211 txqs = 0xffffffff; 2212 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); 2213 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, 2214 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", 2215 txqs, 2216 sc->sc_txq_active, 2217 sc->sc_txq_active | txqs); 2218 sc->sc_txq_active |= txqs; 2219 ATH_PCU_UNLOCK(sc); 2220 } 2221 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); 2222 } 2223 if (status & HAL_INT_BMISS) { 2224 sc->sc_stats.ast_bmiss++; 2225 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); 2226 } 2227 if (status & HAL_INT_GTT) 2228 sc->sc_stats.ast_tx_timeout++; 2229 if (status & HAL_INT_CST) 2230 sc->sc_stats.ast_tx_cst++; 2231 if (status & HAL_INT_MIB) { 2232 sc->sc_stats.ast_mib++; 2233 ATH_PCU_LOCK(sc); 2234 /* 2235 * Disable interrupts until we service the MIB 2236 * interrupt; otherwise it will continue to fire. 2237 */ 2238 ath_hal_intrset(ah, 0); 2239 /* 2240 * Let the hal handle the event. We assume it will 2241 * clear whatever condition caused the interrupt. 2242 */ 2243 ath_hal_mibevent(ah, &sc->sc_halstats); 2244 /* 2245 * Don't reset the interrupt if we've just 2246 * kicked the PCU, or we may get a nested 2247 * RXEOL before the rxproc has had a chance 2248 * to run. 2249 */ 2250 if (sc->sc_kickpcu == 0) 2251 ath_hal_intrset(ah, sc->sc_imask); 2252 ATH_PCU_UNLOCK(sc); 2253 } 2254 if (status & HAL_INT_RXORN) { 2255 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ 2256 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); 2257 sc->sc_stats.ast_rxorn++; 2258 } 2259 if (status & HAL_INT_TSFOOR) { 2260 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); 2261 sc->sc_syncbeacon = 1; 2262 } 2263 if (status & HAL_INT_MCI) { 2264 ath_btcoex_mci_intr(sc); 2265 } 2266 } 2267 ATH_PCU_LOCK(sc); 2268 sc->sc_intr_cnt--; 2269 ATH_PCU_UNLOCK(sc); 2270 2271 ATH_LOCK(sc); 2272 ath_power_restore_power_state(sc); 2273 ATH_UNLOCK(sc); 2274} 2275 2276static void 2277ath_fatal_proc(void *arg, int pending) 2278{ 2279 struct ath_softc *sc = arg; 2280 u_int32_t *state; 2281 u_int32_t len; 2282 void *sp; 2283 2284 if (sc->sc_invalid) 2285 return; 2286 2287 device_printf(sc->sc_dev, "hardware error; resetting\n"); 2288 /* 2289 * Fatal errors are unrecoverable. Typically these 2290 * are caused by DMA errors. Collect h/w state from 2291 * the hal so we can diagnose what's going on. 2292 */ 2293 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { 2294 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); 2295 state = sp; 2296 device_printf(sc->sc_dev, 2297 "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", state[0], 2298 state[1] , state[2], state[3], state[4], state[5]); 2299 } 2300 ath_reset(sc, ATH_RESET_NOLOSS); 2301} 2302 2303static void 2304ath_bmiss_vap(struct ieee80211vap *vap) 2305{ 2306 struct ath_softc *sc = vap->iv_ic->ic_softc; 2307 2308 /* 2309 * Workaround phantom bmiss interrupts by sanity-checking 2310 * the time of our last rx'd frame. If it is within the 2311 * beacon miss interval then ignore the interrupt. If it's 2312 * truly a bmiss we'll get another interrupt soon and that'll 2313 * be dispatched up for processing. Note this applies only 2314 * for h/w beacon miss events. 2315 */ 2316 2317 /* 2318 * XXX TODO: Just read the TSF during the interrupt path; 2319 * that way we don't have to wake up again just to read it 2320 * again. 2321 */ 2322 ATH_LOCK(sc); 2323 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2324 ATH_UNLOCK(sc); 2325 2326 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { 2327 u_int64_t lastrx = sc->sc_lastrx; 2328 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); 2329 /* XXX should take a locked ref to iv_bss */ 2330 u_int bmisstimeout = 2331 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; 2332 2333 DPRINTF(sc, ATH_DEBUG_BEACON, 2334 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", 2335 __func__, (unsigned long long) tsf, 2336 (unsigned long long)(tsf - lastrx), 2337 (unsigned long long) lastrx, bmisstimeout); 2338 2339 if (tsf - lastrx <= bmisstimeout) { 2340 sc->sc_stats.ast_bmiss_phantom++; 2341 2342 ATH_LOCK(sc); 2343 ath_power_restore_power_state(sc); 2344 ATH_UNLOCK(sc); 2345 2346 return; 2347 } 2348 } 2349 2350 /* 2351 * There's no need to keep the hardware awake during the call 2352 * to av_bmiss(). 2353 */ 2354 ATH_LOCK(sc); 2355 ath_power_restore_power_state(sc); 2356 ATH_UNLOCK(sc); 2357 2358 /* 2359 * Attempt to force a beacon resync. 2360 */ 2361 sc->sc_syncbeacon = 1; 2362 2363 ATH_VAP(vap)->av_bmiss(vap); 2364} 2365 2366/* XXX this needs a force wakeup! */ 2367int 2368ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) 2369{ 2370 uint32_t rsize; 2371 void *sp; 2372 2373 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) 2374 return 0; 2375 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); 2376 *hangs = *(uint32_t *)sp; 2377 return 1; 2378} 2379 2380static void 2381ath_bmiss_proc(void *arg, int pending) 2382{ 2383 struct ath_softc *sc = arg; 2384 uint32_t hangs; 2385 2386 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); 2387 2388 ATH_LOCK(sc); 2389 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2390 ATH_UNLOCK(sc); 2391 2392 ath_beacon_miss(sc); 2393 2394 /* 2395 * Do a reset upon any becaon miss event. 2396 * 2397 * It may be a non-recognised RX clear hang which needs a reset 2398 * to clear. 2399 */ 2400 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { 2401 ath_reset(sc, ATH_RESET_NOLOSS); 2402 device_printf(sc->sc_dev, 2403 "bb hang detected (0x%x), resetting\n", hangs); 2404 } else { 2405 ath_reset(sc, ATH_RESET_NOLOSS); 2406 ieee80211_beacon_miss(&sc->sc_ic); 2407 } 2408 2409 /* Force a beacon resync, in case they've drifted */ 2410 sc->sc_syncbeacon = 1; 2411 2412 ATH_LOCK(sc); 2413 ath_power_restore_power_state(sc); 2414 ATH_UNLOCK(sc); 2415} 2416 2417/* 2418 * Handle TKIP MIC setup to deal hardware that doesn't do MIC 2419 * calcs together with WME. If necessary disable the crypto 2420 * hardware and mark the 802.11 state so keys will be setup 2421 * with the MIC work done in software. 2422 */ 2423static void 2424ath_settkipmic(struct ath_softc *sc) 2425{ 2426 struct ieee80211com *ic = &sc->sc_ic; 2427 2428 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { 2429 if (ic->ic_flags & IEEE80211_F_WME) { 2430 ath_hal_settkipmic(sc->sc_ah, AH_FALSE); 2431 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; 2432 } else { 2433 ath_hal_settkipmic(sc->sc_ah, AH_TRUE); 2434 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; 2435 } 2436 } 2437} 2438 2439static int 2440ath_init(struct ath_softc *sc) 2441{ 2442 struct ieee80211com *ic = &sc->sc_ic; 2443 struct ath_hal *ah = sc->sc_ah; 2444 HAL_STATUS status; 2445 2446 ATH_LOCK_ASSERT(sc); 2447 2448 /* 2449 * Force the sleep state awake. 2450 */ 2451 ath_power_setselfgen(sc, HAL_PM_AWAKE); 2452 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2453 ath_power_setpower(sc, HAL_PM_AWAKE); 2454 2455 /* 2456 * Stop anything previously setup. This is safe 2457 * whether this is the first time through or not. 2458 */ 2459 ath_stop(sc); 2460 2461 /* 2462 * The basic interface to setting the hardware in a good 2463 * state is ``reset''. On return the hardware is known to 2464 * be powered up and with interrupts disabled. This must 2465 * be followed by initialization of the appropriate bits 2466 * and then setup of the interrupt mask. 2467 */ 2468 ath_settkipmic(sc); 2469 ath_update_chainmasks(sc, ic->ic_curchan); 2470 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2471 sc->sc_cur_rxchainmask); 2472 2473 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, 2474 HAL_RESET_NORMAL, &status)) { 2475 device_printf(sc->sc_dev, 2476 "unable to reset hardware; hal status %u\n", status); 2477 return (ENODEV); 2478 } 2479 2480 ATH_RX_LOCK(sc); 2481 sc->sc_rx_stopped = 1; 2482 sc->sc_rx_resetted = 1; 2483 ATH_RX_UNLOCK(sc); 2484 2485 ath_chan_change(sc, ic->ic_curchan); 2486 2487 /* Let DFS at it in case it's a DFS channel */ 2488 ath_dfs_radar_enable(sc, ic->ic_curchan); 2489 2490 /* Let spectral at in case spectral is enabled */ 2491 ath_spectral_enable(sc, ic->ic_curchan); 2492 2493 /* 2494 * Let bluetooth coexistence at in case it's needed for this channel 2495 */ 2496 ath_btcoex_enable(sc, ic->ic_curchan); 2497 2498 /* 2499 * If we're doing TDMA, enforce the TXOP limitation for chips that 2500 * support it. 2501 */ 2502 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2503 ath_hal_setenforcetxop(sc->sc_ah, 1); 2504 else 2505 ath_hal_setenforcetxop(sc->sc_ah, 0); 2506 2507 /* 2508 * Likewise this is set during reset so update 2509 * state cached in the driver. 2510 */ 2511 sc->sc_diversity = ath_hal_getdiversity(ah); 2512 sc->sc_lastlongcal = ticks; 2513 sc->sc_resetcal = 1; 2514 sc->sc_lastcalreset = 0; 2515 sc->sc_lastani = ticks; 2516 sc->sc_lastshortcal = ticks; 2517 sc->sc_doresetcal = AH_FALSE; 2518 /* 2519 * Beacon timers were cleared here; give ath_newstate() 2520 * a hint that the beacon timers should be poked when 2521 * things transition to the RUN state. 2522 */ 2523 sc->sc_beacons = 0; 2524 2525 /* 2526 * Setup the hardware after reset: the key cache 2527 * is filled as needed and the receive engine is 2528 * set going. Frame transmit is handled entirely 2529 * in the frame output path; there's nothing to do 2530 * here except setup the interrupt mask. 2531 */ 2532 if (ath_startrecv(sc) != 0) { 2533 device_printf(sc->sc_dev, "unable to start recv logic\n"); 2534 ath_power_restore_power_state(sc); 2535 return (ENODEV); 2536 } 2537 2538 /* 2539 * Enable interrupts. 2540 */ 2541 sc->sc_imask = HAL_INT_RX | HAL_INT_TX 2542 | HAL_INT_RXORN | HAL_INT_TXURN 2543 | HAL_INT_FATAL | HAL_INT_GLOBAL; 2544 2545 /* 2546 * Enable RX EDMA bits. Note these overlap with 2547 * HAL_INT_RX and HAL_INT_RXDESC respectively. 2548 */ 2549 if (sc->sc_isedma) 2550 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); 2551 2552 /* 2553 * If we're an EDMA NIC, we don't care about RXEOL. 2554 * Writing a new descriptor in will simply restart 2555 * RX DMA. 2556 */ 2557 if (! sc->sc_isedma) 2558 sc->sc_imask |= HAL_INT_RXEOL; 2559 2560 /* 2561 * Enable MCI interrupt for MCI devices. 2562 */ 2563 if (sc->sc_btcoex_mci) 2564 sc->sc_imask |= HAL_INT_MCI; 2565 2566 /* 2567 * Enable MIB interrupts when there are hardware phy counters. 2568 * Note we only do this (at the moment) for station mode. 2569 */ 2570 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) 2571 sc->sc_imask |= HAL_INT_MIB; 2572 2573 /* 2574 * XXX add capability for this. 2575 * 2576 * If we're in STA mode (and maybe IBSS?) then register for 2577 * TSFOOR interrupts. 2578 */ 2579 if (ic->ic_opmode == IEEE80211_M_STA) 2580 sc->sc_imask |= HAL_INT_TSFOOR; 2581 2582 /* Enable global TX timeout and carrier sense timeout if available */ 2583 if (ath_hal_gtxto_supported(ah)) 2584 sc->sc_imask |= HAL_INT_GTT; 2585 2586 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", 2587 __func__, sc->sc_imask); 2588 2589 sc->sc_running = 1; 2590 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); 2591 ath_hal_intrset(ah, sc->sc_imask); 2592 2593 ath_power_restore_power_state(sc); 2594 2595 return (0); 2596} 2597 2598static void 2599ath_stop(struct ath_softc *sc) 2600{ 2601 struct ath_hal *ah = sc->sc_ah; 2602 2603 ATH_LOCK_ASSERT(sc); 2604 2605 /* 2606 * Wake the hardware up before fiddling with it. 2607 */ 2608 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2609 2610 if (sc->sc_running) { 2611 /* 2612 * Shutdown the hardware and driver: 2613 * reset 802.11 state machine 2614 * turn off timers 2615 * disable interrupts 2616 * turn off the radio 2617 * clear transmit machinery 2618 * clear receive machinery 2619 * drain and release tx queues 2620 * reclaim beacon resources 2621 * power down hardware 2622 * 2623 * Note that some of this work is not possible if the 2624 * hardware is gone (invalid). 2625 */ 2626#ifdef ATH_TX99_DIAG 2627 if (sc->sc_tx99 != NULL) 2628 sc->sc_tx99->stop(sc->sc_tx99); 2629#endif 2630 callout_stop(&sc->sc_wd_ch); 2631 sc->sc_wd_timer = 0; 2632 sc->sc_running = 0; 2633 if (!sc->sc_invalid) { 2634 if (sc->sc_softled) { 2635 callout_stop(&sc->sc_ledtimer); 2636 ath_hal_gpioset(ah, sc->sc_ledpin, 2637 !sc->sc_ledon); 2638 sc->sc_blinking = 0; 2639 } 2640 ath_hal_intrset(ah, 0); 2641 } 2642 /* XXX we should stop RX regardless of whether it's valid */ 2643 if (!sc->sc_invalid) { 2644 ath_stoprecv(sc, 1); 2645 ath_hal_phydisable(ah); 2646 } else 2647 sc->sc_rxlink = NULL; 2648 ath_draintxq(sc, ATH_RESET_DEFAULT); 2649 ath_beacon_free(sc); /* XXX not needed */ 2650 } 2651 2652 /* And now, restore the current power state */ 2653 ath_power_restore_power_state(sc); 2654} 2655 2656/* 2657 * Wait until all pending TX/RX has completed. 2658 * 2659 * This waits until all existing transmit, receive and interrupts 2660 * have completed. It's assumed that the caller has first 2661 * grabbed the reset lock so it doesn't try to do overlapping 2662 * chip resets. 2663 */ 2664#define MAX_TXRX_ITERATIONS 100 2665static void 2666ath_txrx_stop_locked(struct ath_softc *sc) 2667{ 2668 int i = MAX_TXRX_ITERATIONS; 2669 2670 ATH_UNLOCK_ASSERT(sc); 2671 ATH_PCU_LOCK_ASSERT(sc); 2672 2673 /* 2674 * Sleep until all the pending operations have completed. 2675 * 2676 * The caller must ensure that reset has been incremented 2677 * or the pending operations may continue being queued. 2678 */ 2679 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || 2680 sc->sc_txstart_cnt || sc->sc_intr_cnt) { 2681 if (i <= 0) 2682 break; 2683 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 2684 msecs_to_ticks(10)); 2685 i--; 2686 } 2687 2688 if (i <= 0) 2689 device_printf(sc->sc_dev, 2690 "%s: didn't finish after %d iterations\n", 2691 __func__, MAX_TXRX_ITERATIONS); 2692} 2693#undef MAX_TXRX_ITERATIONS 2694 2695#if 0 2696static void 2697ath_txrx_stop(struct ath_softc *sc) 2698{ 2699 ATH_UNLOCK_ASSERT(sc); 2700 ATH_PCU_UNLOCK_ASSERT(sc); 2701 2702 ATH_PCU_LOCK(sc); 2703 ath_txrx_stop_locked(sc); 2704 ATH_PCU_UNLOCK(sc); 2705} 2706#endif 2707 2708static void 2709ath_txrx_start(struct ath_softc *sc) 2710{ 2711 2712 taskqueue_unblock(sc->sc_tq); 2713} 2714 2715/* 2716 * Grab the reset lock, and wait around until no one else 2717 * is trying to do anything with it. 2718 * 2719 * This is totally horrible but we can't hold this lock for 2720 * long enough to do TX/RX or we end up with net80211/ip stack 2721 * LORs and eventual deadlock. 2722 * 2723 * "dowait" signals whether to spin, waiting for the reset 2724 * lock count to reach 0. This should (for now) only be used 2725 * during the reset path, as the rest of the code may not 2726 * be locking-reentrant enough to behave correctly. 2727 * 2728 * Another, cleaner way should be found to serialise all of 2729 * these operations. 2730 */ 2731#define MAX_RESET_ITERATIONS 25 2732static int 2733ath_reset_grablock(struct ath_softc *sc, int dowait) 2734{ 2735 int w = 0; 2736 int i = MAX_RESET_ITERATIONS; 2737 2738 ATH_PCU_LOCK_ASSERT(sc); 2739 do { 2740 if (sc->sc_inreset_cnt == 0) { 2741 w = 1; 2742 break; 2743 } 2744 if (dowait == 0) { 2745 w = 0; 2746 break; 2747 } 2748 ATH_PCU_UNLOCK(sc); 2749 /* 2750 * 1 tick is likely not enough time for long calibrations 2751 * to complete. So we should wait quite a while. 2752 */ 2753 pause("ath_reset_grablock", msecs_to_ticks(100)); 2754 i--; 2755 ATH_PCU_LOCK(sc); 2756 } while (i > 0); 2757 2758 /* 2759 * We always increment the refcounter, regardless 2760 * of whether we succeeded to get it in an exclusive 2761 * way. 2762 */ 2763 sc->sc_inreset_cnt++; 2764 2765 if (i <= 0) 2766 device_printf(sc->sc_dev, 2767 "%s: didn't finish after %d iterations\n", 2768 __func__, MAX_RESET_ITERATIONS); 2769 2770 if (w == 0) 2771 device_printf(sc->sc_dev, 2772 "%s: warning, recursive reset path!\n", 2773 __func__); 2774 2775 return w; 2776} 2777#undef MAX_RESET_ITERATIONS 2778 2779/* 2780 * Reset the hardware w/o losing operational state. This is 2781 * basically a more efficient way of doing ath_stop, ath_init, 2782 * followed by state transitions to the current 802.11 2783 * operational state. Used to recover from various errors and 2784 * to reset or reload hardware state. 2785 */ 2786int 2787ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 2788{ 2789 struct ieee80211com *ic = &sc->sc_ic; 2790 struct ath_hal *ah = sc->sc_ah; 2791 HAL_STATUS status; 2792 int i; 2793 2794 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); 2795 2796 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ 2797 ATH_PCU_UNLOCK_ASSERT(sc); 2798 ATH_UNLOCK_ASSERT(sc); 2799 2800 /* Try to (stop any further TX/RX from occurring */ 2801 taskqueue_block(sc->sc_tq); 2802 2803 /* 2804 * Wake the hardware up. 2805 */ 2806 ATH_LOCK(sc); 2807 ath_power_set_power_state(sc, HAL_PM_AWAKE); 2808 ATH_UNLOCK(sc); 2809 2810 ATH_PCU_LOCK(sc); 2811 2812 /* 2813 * Grab the reset lock before TX/RX is stopped. 2814 * 2815 * This is needed to ensure that when the TX/RX actually does finish, 2816 * no further TX/RX/reset runs in parallel with this. 2817 */ 2818 if (ath_reset_grablock(sc, 1) == 0) { 2819 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 2820 __func__); 2821 } 2822 2823 /* disable interrupts */ 2824 ath_hal_intrset(ah, 0); 2825 2826 /* 2827 * Now, ensure that any in progress TX/RX completes before we 2828 * continue. 2829 */ 2830 ath_txrx_stop_locked(sc); 2831 2832 ATH_PCU_UNLOCK(sc); 2833 2834 /* 2835 * Regardless of whether we're doing a no-loss flush or 2836 * not, stop the PCU and handle what's in the RX queue. 2837 * That way frames aren't dropped which shouldn't be. 2838 */ 2839 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); 2840 ath_rx_flush(sc); 2841 2842 /* 2843 * Should now wait for pending TX/RX to complete 2844 * and block future ones from occurring. This needs to be 2845 * done before the TX queue is drained. 2846 */ 2847 ath_draintxq(sc, reset_type); /* stop xmit side */ 2848 2849 ath_settkipmic(sc); /* configure TKIP MIC handling */ 2850 /* NB: indicate channel change so we do a full reset */ 2851 ath_update_chainmasks(sc, ic->ic_curchan); 2852 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 2853 sc->sc_cur_rxchainmask); 2854 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, 2855 HAL_RESET_NORMAL, &status)) 2856 device_printf(sc->sc_dev, 2857 "%s: unable to reset hardware; hal status %u\n", 2858 __func__, status); 2859 sc->sc_diversity = ath_hal_getdiversity(ah); 2860 2861 ATH_RX_LOCK(sc); 2862 sc->sc_rx_stopped = 1; 2863 sc->sc_rx_resetted = 1; 2864 ATH_RX_UNLOCK(sc); 2865 2866 /* Let DFS at it in case it's a DFS channel */ 2867 ath_dfs_radar_enable(sc, ic->ic_curchan); 2868 2869 /* Let spectral at in case spectral is enabled */ 2870 ath_spectral_enable(sc, ic->ic_curchan); 2871 2872 /* 2873 * Let bluetooth coexistence at in case it's needed for this channel 2874 */ 2875 ath_btcoex_enable(sc, ic->ic_curchan); 2876 2877 /* 2878 * If we're doing TDMA, enforce the TXOP limitation for chips that 2879 * support it. 2880 */ 2881 if (sc->sc_hasenforcetxop && sc->sc_tdma) 2882 ath_hal_setenforcetxop(sc->sc_ah, 1); 2883 else 2884 ath_hal_setenforcetxop(sc->sc_ah, 0); 2885 2886 if (ath_startrecv(sc) != 0) /* restart recv */ 2887 device_printf(sc->sc_dev, 2888 "%s: unable to start recv logic\n", __func__); 2889 /* 2890 * We may be doing a reset in response to an ioctl 2891 * that changes the channel so update any state that 2892 * might change as a result. 2893 */ 2894 ath_chan_change(sc, ic->ic_curchan); 2895 if (sc->sc_beacons) { /* restart beacons */ 2896#ifdef IEEE80211_SUPPORT_TDMA 2897 if (sc->sc_tdma) 2898 ath_tdma_config(sc, NULL); 2899 else 2900#endif 2901 ath_beacon_config(sc, NULL); 2902 } 2903 2904 /* 2905 * Release the reset lock and re-enable interrupts here. 2906 * If an interrupt was being processed in ath_intr(), 2907 * it would disable interrupts at this point. So we have 2908 * to atomically enable interrupts and decrement the 2909 * reset counter - this way ath_intr() doesn't end up 2910 * disabling interrupts without a corresponding enable 2911 * in the rest or channel change path. 2912 * 2913 * Grab the TX reference in case we need to transmit. 2914 * That way a parallel transmit doesn't. 2915 */ 2916 ATH_PCU_LOCK(sc); 2917 sc->sc_inreset_cnt--; 2918 sc->sc_txstart_cnt++; 2919 /* XXX only do this if sc_inreset_cnt == 0? */ 2920 ath_hal_intrset(ah, sc->sc_imask); 2921 ATH_PCU_UNLOCK(sc); 2922 2923 /* 2924 * TX and RX can be started here. If it were started with 2925 * sc_inreset_cnt > 0, the TX and RX path would abort. 2926 * Thus if this is a nested call through the reset or 2927 * channel change code, TX completion will occur but 2928 * RX completion and ath_start / ath_tx_start will not 2929 * run. 2930 */ 2931 2932 /* Restart TX/RX as needed */ 2933 ath_txrx_start(sc); 2934 2935 /* XXX TODO: we need to hold the tx refcount here! */ 2936 2937 /* Restart TX completion and pending TX */ 2938 if (reset_type == ATH_RESET_NOLOSS) { 2939 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 2940 if (ATH_TXQ_SETUP(sc, i)) { 2941 ATH_TXQ_LOCK(&sc->sc_txq[i]); 2942 ath_txq_restart_dma(sc, &sc->sc_txq[i]); 2943 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 2944 2945 ATH_TX_LOCK(sc); 2946 ath_txq_sched(sc, &sc->sc_txq[i]); 2947 ATH_TX_UNLOCK(sc); 2948 } 2949 } 2950 } 2951 2952 ATH_LOCK(sc); 2953 ath_power_restore_power_state(sc); 2954 ATH_UNLOCK(sc); 2955 2956 ATH_PCU_LOCK(sc); 2957 sc->sc_txstart_cnt--; 2958 ATH_PCU_UNLOCK(sc); 2959 2960 /* Handle any frames in the TX queue */ 2961 /* 2962 * XXX should this be done by the caller, rather than 2963 * ath_reset() ? 2964 */ 2965 ath_tx_kick(sc); /* restart xmit */ 2966 return 0; 2967} 2968 2969static int 2970ath_reset_vap(struct ieee80211vap *vap, u_long cmd) 2971{ 2972 struct ieee80211com *ic = vap->iv_ic; 2973 struct ath_softc *sc = ic->ic_softc; 2974 struct ath_hal *ah = sc->sc_ah; 2975 2976 switch (cmd) { 2977 case IEEE80211_IOC_TXPOWER: 2978 /* 2979 * If per-packet TPC is enabled, then we have nothing 2980 * to do; otherwise we need to force the global limit. 2981 * All this can happen directly; no need to reset. 2982 */ 2983 if (!ath_hal_gettpc(ah)) 2984 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); 2985 return 0; 2986 } 2987 /* XXX? Full or NOLOSS? */ 2988 return ath_reset(sc, ATH_RESET_FULL); 2989} 2990 2991struct ath_buf * 2992_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) 2993{ 2994 struct ath_buf *bf; 2995 2996 ATH_TXBUF_LOCK_ASSERT(sc); 2997 2998 if (btype == ATH_BUFTYPE_MGMT) 2999 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); 3000 else 3001 bf = TAILQ_FIRST(&sc->sc_txbuf); 3002 3003 if (bf == NULL) { 3004 sc->sc_stats.ast_tx_getnobuf++; 3005 } else { 3006 if (bf->bf_flags & ATH_BUF_BUSY) { 3007 sc->sc_stats.ast_tx_getbusybuf++; 3008 bf = NULL; 3009 } 3010 } 3011 3012 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { 3013 if (btype == ATH_BUFTYPE_MGMT) 3014 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); 3015 else { 3016 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); 3017 sc->sc_txbuf_cnt--; 3018 3019 /* 3020 * This shuldn't happen; however just to be 3021 * safe print a warning and fudge the txbuf 3022 * count. 3023 */ 3024 if (sc->sc_txbuf_cnt < 0) { 3025 device_printf(sc->sc_dev, 3026 "%s: sc_txbuf_cnt < 0?\n", 3027 __func__); 3028 sc->sc_txbuf_cnt = 0; 3029 } 3030 } 3031 } else 3032 bf = NULL; 3033 3034 if (bf == NULL) { 3035 /* XXX should check which list, mgmt or otherwise */ 3036 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, 3037 TAILQ_FIRST(&sc->sc_txbuf) == NULL ? 3038 "out of xmit buffers" : "xmit buffer busy"); 3039 return NULL; 3040 } 3041 3042 /* XXX TODO: should do this at buffer list initialisation */ 3043 /* XXX (then, ensure the buffer has the right flag set) */ 3044 bf->bf_flags = 0; 3045 if (btype == ATH_BUFTYPE_MGMT) 3046 bf->bf_flags |= ATH_BUF_MGMT; 3047 else 3048 bf->bf_flags &= (~ATH_BUF_MGMT); 3049 3050 /* Valid bf here; clear some basic fields */ 3051 bf->bf_next = NULL; /* XXX just to be sure */ 3052 bf->bf_last = NULL; /* XXX again, just to be sure */ 3053 bf->bf_comp = NULL; /* XXX again, just to be sure */ 3054 bzero(&bf->bf_state, sizeof(bf->bf_state)); 3055 3056 /* 3057 * Track the descriptor ID only if doing EDMA 3058 */ 3059 if (sc->sc_isedma) { 3060 bf->bf_descid = sc->sc_txbuf_descid; 3061 sc->sc_txbuf_descid++; 3062 } 3063 3064 return bf; 3065} 3066 3067/* 3068 * When retrying a software frame, buffers marked ATH_BUF_BUSY 3069 * can't be thrown back on the queue as they could still be 3070 * in use by the hardware. 3071 * 3072 * This duplicates the buffer, or returns NULL. 3073 * 3074 * The descriptor is also copied but the link pointers and 3075 * the DMA segments aren't copied; this frame should thus 3076 * be again passed through the descriptor setup/chain routines 3077 * so the link is correct. 3078 * 3079 * The caller must free the buffer using ath_freebuf(). 3080 */ 3081struct ath_buf * 3082ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) 3083{ 3084 struct ath_buf *tbf; 3085 3086 tbf = ath_getbuf(sc, 3087 (bf->bf_flags & ATH_BUF_MGMT) ? 3088 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); 3089 if (tbf == NULL) 3090 return NULL; /* XXX failure? Why? */ 3091 3092 /* Copy basics */ 3093 tbf->bf_next = NULL; 3094 tbf->bf_nseg = bf->bf_nseg; 3095 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; 3096 tbf->bf_status = bf->bf_status; 3097 tbf->bf_m = bf->bf_m; 3098 tbf->bf_node = bf->bf_node; 3099 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); 3100 /* will be setup by the chain/setup function */ 3101 tbf->bf_lastds = NULL; 3102 /* for now, last == self */ 3103 tbf->bf_last = tbf; 3104 tbf->bf_comp = bf->bf_comp; 3105 3106 /* NOTE: DMA segments will be setup by the setup/chain functions */ 3107 3108 /* The caller has to re-init the descriptor + links */ 3109 3110 /* 3111 * Free the DMA mapping here, before we NULL the mbuf. 3112 * We must only call bus_dmamap_unload() once per mbuf chain 3113 * or behaviour is undefined. 3114 */ 3115 if (bf->bf_m != NULL) { 3116 /* 3117 * XXX is this POSTWRITE call required? 3118 */ 3119 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 3120 BUS_DMASYNC_POSTWRITE); 3121 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 3122 } 3123 3124 bf->bf_m = NULL; 3125 bf->bf_node = NULL; 3126 3127 /* Copy state */ 3128 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); 3129 3130 return tbf; 3131} 3132 3133struct ath_buf * 3134ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) 3135{ 3136 struct ath_buf *bf; 3137 3138 ATH_TXBUF_LOCK(sc); 3139 bf = _ath_getbuf_locked(sc, btype); 3140 /* 3141 * If a mgmt buffer was requested but we're out of those, 3142 * try requesting a normal one. 3143 */ 3144 if (bf == NULL && btype == ATH_BUFTYPE_MGMT) 3145 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); 3146 ATH_TXBUF_UNLOCK(sc); 3147 if (bf == NULL) { 3148 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); 3149 sc->sc_stats.ast_tx_qstop++; 3150 } 3151 return bf; 3152} 3153 3154/* 3155 * Transmit a single frame. 3156 * 3157 * net80211 will free the node reference if the transmit 3158 * fails, so don't free the node reference here. 3159 */ 3160static int 3161ath_transmit(struct ieee80211com *ic, struct mbuf *m) 3162{ 3163 struct ath_softc *sc = ic->ic_softc; 3164 struct ieee80211_node *ni; 3165 struct mbuf *next; 3166 struct ath_buf *bf; 3167 ath_bufhead frags; 3168 int retval = 0; 3169 3170 /* 3171 * Tell the reset path that we're currently transmitting. 3172 */ 3173 ATH_PCU_LOCK(sc); 3174 if (sc->sc_inreset_cnt > 0) { 3175 DPRINTF(sc, ATH_DEBUG_XMIT, 3176 "%s: sc_inreset_cnt > 0; bailing\n", __func__); 3177 ATH_PCU_UNLOCK(sc); 3178 sc->sc_stats.ast_tx_qstop++; 3179 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); 3180 return (ENOBUFS); /* XXX should be EINVAL or? */ 3181 } 3182 sc->sc_txstart_cnt++; 3183 ATH_PCU_UNLOCK(sc); 3184 3185 /* Wake the hardware up already */ 3186 ATH_LOCK(sc); 3187 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3188 ATH_UNLOCK(sc); 3189 3190 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); 3191 /* 3192 * Grab the TX lock - it's ok to do this here; we haven't 3193 * yet started transmitting. 3194 */ 3195 ATH_TX_LOCK(sc); 3196 3197 /* 3198 * Node reference, if there's one. 3199 */ 3200 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 3201 3202 /* 3203 * Enforce how deep a node queue can get. 3204 * 3205 * XXX it would be nicer if we kept an mbuf queue per 3206 * node and only whacked them into ath_bufs when we 3207 * are ready to schedule some traffic from them. 3208 * .. that may come later. 3209 * 3210 * XXX we should also track the per-node hardware queue 3211 * depth so it is easy to limit the _SUM_ of the swq and 3212 * hwq frames. Since we only schedule two HWQ frames 3213 * at a time, this should be OK for now. 3214 */ 3215 if ((!(m->m_flags & M_EAPOL)) && 3216 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { 3217 sc->sc_stats.ast_tx_nodeq_overflow++; 3218 retval = ENOBUFS; 3219 goto finish; 3220 } 3221 3222 /* 3223 * Check how many TX buffers are available. 3224 * 3225 * If this is for non-EAPOL traffic, just leave some 3226 * space free in order for buffer cloning and raw 3227 * frame transmission to occur. 3228 * 3229 * If it's for EAPOL traffic, ignore this for now. 3230 * Management traffic will be sent via the raw transmit 3231 * method which bypasses this check. 3232 * 3233 * This is needed to ensure that EAPOL frames during 3234 * (re) keying have a chance to go out. 3235 * 3236 * See kern/138379 for more information. 3237 */ 3238 if ((!(m->m_flags & M_EAPOL)) && 3239 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { 3240 sc->sc_stats.ast_tx_nobuf++; 3241 retval = ENOBUFS; 3242 goto finish; 3243 } 3244 3245 /* 3246 * Grab a TX buffer and associated resources. 3247 * 3248 * If it's an EAPOL frame, allocate a MGMT ath_buf. 3249 * That way even with temporary buffer exhaustion due to 3250 * the data path doesn't leave us without the ability 3251 * to transmit management frames. 3252 * 3253 * Otherwise allocate a normal buffer. 3254 */ 3255 if (m->m_flags & M_EAPOL) 3256 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); 3257 else 3258 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); 3259 3260 if (bf == NULL) { 3261 /* 3262 * If we failed to allocate a buffer, fail. 3263 * 3264 * We shouldn't fail normally, due to the check 3265 * above. 3266 */ 3267 sc->sc_stats.ast_tx_nobuf++; 3268 retval = ENOBUFS; 3269 goto finish; 3270 } 3271 3272 /* 3273 * At this point we have a buffer; so we need to free it 3274 * if we hit any error conditions. 3275 */ 3276 3277 /* 3278 * Check for fragmentation. If this frame 3279 * has been broken up verify we have enough 3280 * buffers to send all the fragments so all 3281 * go out or none... 3282 */ 3283 TAILQ_INIT(&frags); 3284 if ((m->m_flags & M_FRAG) && 3285 !ath_txfrag_setup(sc, &frags, m, ni)) { 3286 DPRINTF(sc, ATH_DEBUG_XMIT, 3287 "%s: out of txfrag buffers\n", __func__); 3288 sc->sc_stats.ast_tx_nofrag++; 3289 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3290 /* 3291 * XXXGL: is mbuf valid after ath_txfrag_setup? If yes, 3292 * we shouldn't free it but return back. 3293 */ 3294 ieee80211_free_mbuf(m); 3295 m = NULL; 3296 goto bad; 3297 } 3298 3299 /* 3300 * At this point if we have any TX fragments, then we will 3301 * have bumped the node reference once for each of those. 3302 */ 3303 3304 /* 3305 * XXX Is there anything actually _enforcing_ that the 3306 * fragments are being transmitted in one hit, rather than 3307 * being interleaved with other transmissions on that 3308 * hardware queue? 3309 * 3310 * The ATH TX output lock is the only thing serialising this 3311 * right now. 3312 */ 3313 3314 /* 3315 * Calculate the "next fragment" length field in ath_buf 3316 * in order to let the transmit path know enough about 3317 * what to next write to the hardware. 3318 */ 3319 if (m->m_flags & M_FRAG) { 3320 struct ath_buf *fbf = bf; 3321 struct ath_buf *n_fbf = NULL; 3322 struct mbuf *fm = m->m_nextpkt; 3323 3324 /* 3325 * We need to walk the list of fragments and set 3326 * the next size to the following buffer. 3327 * However, the first buffer isn't in the frag 3328 * list, so we have to do some gymnastics here. 3329 */ 3330 TAILQ_FOREACH(n_fbf, &frags, bf_list) { 3331 fbf->bf_nextfraglen = fm->m_pkthdr.len; 3332 fbf = n_fbf; 3333 fm = fm->m_nextpkt; 3334 } 3335 } 3336 3337nextfrag: 3338 /* 3339 * Pass the frame to the h/w for transmission. 3340 * Fragmented frames have each frag chained together 3341 * with m_nextpkt. We know there are sufficient ath_buf's 3342 * to send all the frags because of work done by 3343 * ath_txfrag_setup. We leave m_nextpkt set while 3344 * calling ath_tx_start so it can use it to extend the 3345 * the tx duration to cover the subsequent frag and 3346 * so it can reclaim all the mbufs in case of an error; 3347 * ath_tx_start clears m_nextpkt once it commits to 3348 * handing the frame to the hardware. 3349 * 3350 * Note: if this fails, then the mbufs are freed but 3351 * not the node reference. 3352 * 3353 * So, we now have to free the node reference ourselves here 3354 * and return OK up to the stack. 3355 */ 3356 next = m->m_nextpkt; 3357 if (ath_tx_start(sc, ni, bf, m)) { 3358bad: 3359 if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); 3360reclaim: 3361 bf->bf_m = NULL; 3362 bf->bf_node = NULL; 3363 ATH_TXBUF_LOCK(sc); 3364 ath_returnbuf_head(sc, bf); 3365 /* 3366 * Free the rest of the node references and 3367 * buffers for the fragment list. 3368 */ 3369 ath_txfrag_cleanup(sc, &frags, ni); 3370 ATH_TXBUF_UNLOCK(sc); 3371 3372 /* 3373 * XXX: And free the node/return OK; ath_tx_start() may have 3374 * modified the buffer. We currently have no way to 3375 * signify that the mbuf was freed but there was an error. 3376 */ 3377 ieee80211_free_node(ni); 3378 retval = 0; 3379 goto finish; 3380 } 3381 3382 /* 3383 * Check here if the node is in power save state. 3384 */ 3385 ath_tx_update_tim(sc, ni, 1); 3386 3387 if (next != NULL) { 3388 /* 3389 * Beware of state changing between frags. 3390 * XXX check sta power-save state? 3391 */ 3392 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { 3393 DPRINTF(sc, ATH_DEBUG_XMIT, 3394 "%s: flush fragmented packet, state %s\n", 3395 __func__, 3396 ieee80211_state_name[ni->ni_vap->iv_state]); 3397 /* XXX dmamap */ 3398 ieee80211_free_mbuf(next); 3399 goto reclaim; 3400 } 3401 m = next; 3402 bf = TAILQ_FIRST(&frags); 3403 KASSERT(bf != NULL, ("no buf for txfrag")); 3404 TAILQ_REMOVE(&frags, bf, bf_list); 3405 goto nextfrag; 3406 } 3407 3408 /* 3409 * Bump watchdog timer. 3410 */ 3411 sc->sc_wd_timer = 5; 3412 3413finish: 3414 ATH_TX_UNLOCK(sc); 3415 3416 /* 3417 * Finished transmitting! 3418 */ 3419 ATH_PCU_LOCK(sc); 3420 sc->sc_txstart_cnt--; 3421 ATH_PCU_UNLOCK(sc); 3422 3423 /* Sleep the hardware if required */ 3424 ATH_LOCK(sc); 3425 ath_power_restore_power_state(sc); 3426 ATH_UNLOCK(sc); 3427 3428 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); 3429 3430 return (retval); 3431} 3432 3433static int 3434ath_media_change(struct ifnet *ifp) 3435{ 3436 int error = ieee80211_media_change(ifp); 3437 /* NB: only the fixed rate can change and that doesn't need a reset */ 3438 return (error == ENETRESET ? 0 : error); 3439} 3440 3441/* 3442 * Block/unblock tx+rx processing while a key change is done. 3443 * We assume the caller serializes key management operations 3444 * so we only need to worry about synchronization with other 3445 * uses that originate in the driver. 3446 */ 3447static void 3448ath_key_update_begin(struct ieee80211vap *vap) 3449{ 3450 struct ath_softc *sc = vap->iv_ic->ic_softc; 3451 3452 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3453 taskqueue_block(sc->sc_tq); 3454} 3455 3456static void 3457ath_key_update_end(struct ieee80211vap *vap) 3458{ 3459 struct ath_softc *sc = vap->iv_ic->ic_softc; 3460 3461 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); 3462 taskqueue_unblock(sc->sc_tq); 3463} 3464 3465static void 3466ath_update_promisc(struct ieee80211com *ic) 3467{ 3468 struct ath_softc *sc = ic->ic_softc; 3469 u_int32_t rfilt; 3470 3471 /* configure rx filter */ 3472 ATH_LOCK(sc); 3473 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3474 rfilt = ath_calcrxfilter(sc); 3475 ath_hal_setrxfilter(sc->sc_ah, rfilt); 3476 ath_power_restore_power_state(sc); 3477 ATH_UNLOCK(sc); 3478 3479 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); 3480} 3481 3482/* 3483 * Driver-internal mcast update call. 3484 * 3485 * Assumes the hardware is already awake. 3486 */ 3487static void 3488ath_update_mcast_hw(struct ath_softc *sc) 3489{ 3490 struct ieee80211com *ic = &sc->sc_ic; 3491 u_int32_t mfilt[2]; 3492 3493 /* calculate and install multicast filter */ 3494 if (ic->ic_allmulti == 0) { 3495 struct ieee80211vap *vap; 3496 struct ifnet *ifp; 3497 struct ifmultiaddr *ifma; 3498 3499 /* 3500 * Merge multicast addresses to form the hardware filter. 3501 */ 3502 mfilt[0] = mfilt[1] = 0; 3503 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 3504 ifp = vap->iv_ifp; 3505 if_maddr_rlock(ifp); 3506 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3507 caddr_t dl; 3508 uint32_t val; 3509 uint8_t pos; 3510 3511 /* calculate XOR of eight 6bit values */ 3512 dl = LLADDR((struct sockaddr_dl *) 3513 ifma->ifma_addr); 3514 val = le32dec(dl + 0); 3515 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ 3516 val; 3517 val = le32dec(dl + 3); 3518 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ 3519 val; 3520 pos &= 0x3f; 3521 mfilt[pos / 32] |= (1 << (pos % 32)); 3522 } 3523 if_maddr_runlock(ifp); 3524 } 3525 } else 3526 mfilt[0] = mfilt[1] = ~0; 3527 3528 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); 3529 3530 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", 3531 __func__, mfilt[0], mfilt[1]); 3532} 3533 3534/* 3535 * Called from the net80211 layer - force the hardware 3536 * awake before operating. 3537 */ 3538static void 3539ath_update_mcast(struct ieee80211com *ic) 3540{ 3541 struct ath_softc *sc = ic->ic_softc; 3542 3543 ATH_LOCK(sc); 3544 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3545 ATH_UNLOCK(sc); 3546 3547 ath_update_mcast_hw(sc); 3548 3549 ATH_LOCK(sc); 3550 ath_power_restore_power_state(sc); 3551 ATH_UNLOCK(sc); 3552} 3553 3554void 3555ath_mode_init(struct ath_softc *sc) 3556{ 3557 struct ieee80211com *ic = &sc->sc_ic; 3558 struct ath_hal *ah = sc->sc_ah; 3559 u_int32_t rfilt; 3560 3561 /* configure rx filter */ 3562 rfilt = ath_calcrxfilter(sc); 3563 ath_hal_setrxfilter(ah, rfilt); 3564 3565 /* configure operational mode */ 3566 ath_hal_setopmode(ah); 3567 3568 /* handle any link-level address change */ 3569 ath_hal_setmac(ah, ic->ic_macaddr); 3570 3571 /* calculate and install multicast filter */ 3572 ath_update_mcast_hw(sc); 3573} 3574 3575/* 3576 * Set the slot time based on the current setting. 3577 */ 3578void 3579ath_setslottime(struct ath_softc *sc) 3580{ 3581 struct ieee80211com *ic = &sc->sc_ic; 3582 struct ath_hal *ah = sc->sc_ah; 3583 u_int usec; 3584 3585 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) 3586 usec = 13; 3587 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) 3588 usec = 21; 3589 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { 3590 /* honor short/long slot time only in 11g */ 3591 /* XXX shouldn't honor on pure g or turbo g channel */ 3592 if (ic->ic_flags & IEEE80211_F_SHSLOT) 3593 usec = HAL_SLOT_TIME_9; 3594 else 3595 usec = HAL_SLOT_TIME_20; 3596 } else 3597 usec = HAL_SLOT_TIME_9; 3598 3599 DPRINTF(sc, ATH_DEBUG_RESET, 3600 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", 3601 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, 3602 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); 3603 3604 /* Wake up the hardware first before updating the slot time */ 3605 ATH_LOCK(sc); 3606 ath_power_set_power_state(sc, HAL_PM_AWAKE); 3607 ath_hal_setslottime(ah, usec); 3608 ath_power_restore_power_state(sc); 3609 sc->sc_updateslot = OK; 3610 ATH_UNLOCK(sc); 3611} 3612 3613/* 3614 * Callback from the 802.11 layer to update the 3615 * slot time based on the current setting. 3616 */ 3617static void 3618ath_updateslot(struct ieee80211com *ic) 3619{ 3620 struct ath_softc *sc = ic->ic_softc; 3621 3622 /* 3623 * When not coordinating the BSS, change the hardware 3624 * immediately. For other operation we defer the change 3625 * until beacon updates have propagated to the stations. 3626 * 3627 * XXX sc_updateslot isn't changed behind a lock? 3628 */ 3629 if (ic->ic_opmode == IEEE80211_M_HOSTAP || 3630 ic->ic_opmode == IEEE80211_M_MBSS) 3631 sc->sc_updateslot = UPDATE; 3632 else 3633 ath_setslottime(sc); 3634} 3635 3636/* 3637 * Append the contents of src to dst; both queues 3638 * are assumed to be locked. 3639 */ 3640void 3641ath_txqmove(struct ath_txq *dst, struct ath_txq *src) 3642{ 3643 3644 ATH_TXQ_LOCK_ASSERT(src); 3645 ATH_TXQ_LOCK_ASSERT(dst); 3646 3647 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); 3648 dst->axq_link = src->axq_link; 3649 src->axq_link = NULL; 3650 dst->axq_depth += src->axq_depth; 3651 dst->axq_aggr_depth += src->axq_aggr_depth; 3652 src->axq_depth = 0; 3653 src->axq_aggr_depth = 0; 3654} 3655 3656/* 3657 * Reset the hardware, with no loss. 3658 * 3659 * This can't be used for a general case reset. 3660 */ 3661static void 3662ath_reset_proc(void *arg, int pending) 3663{ 3664 struct ath_softc *sc = arg; 3665 3666#if 0 3667 device_printf(sc->sc_dev, "%s: resetting\n", __func__); 3668#endif 3669 ath_reset(sc, ATH_RESET_NOLOSS); 3670} 3671 3672/* 3673 * Reset the hardware after detecting beacons have stopped. 3674 */ 3675static void 3676ath_bstuck_proc(void *arg, int pending) 3677{ 3678 struct ath_softc *sc = arg; 3679 uint32_t hangs = 0; 3680 3681 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) 3682 device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs); 3683 3684#ifdef ATH_DEBUG_ALQ 3685 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) 3686 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); 3687#endif 3688 3689 device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n", 3690 sc->sc_bmisscount); 3691 sc->sc_stats.ast_bstuck++; 3692 /* 3693 * This assumes that there's no simultaneous channel mode change 3694 * occurring. 3695 */ 3696 ath_reset(sc, ATH_RESET_NOLOSS); 3697} 3698 3699static int 3700ath_desc_alloc(struct ath_softc *sc) 3701{ 3702 int error; 3703 3704 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 3705 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); 3706 if (error != 0) { 3707 return error; 3708 } 3709 sc->sc_txbuf_cnt = ath_txbuf; 3710 3711 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, 3712 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, 3713 ATH_TXDESC); 3714 if (error != 0) { 3715 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3716 return error; 3717 } 3718 3719 /* 3720 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the 3721 * flag doesn't have to be set in ath_getbuf_locked(). 3722 */ 3723 3724 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 3725 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); 3726 if (error != 0) { 3727 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3728 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3729 &sc->sc_txbuf_mgmt); 3730 return error; 3731 } 3732 return 0; 3733} 3734 3735static void 3736ath_desc_free(struct ath_softc *sc) 3737{ 3738 3739 if (sc->sc_bdma.dd_desc_len != 0) 3740 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 3741 if (sc->sc_txdma.dd_desc_len != 0) 3742 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 3743 if (sc->sc_txdma_mgmt.dd_desc_len != 0) 3744 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, 3745 &sc->sc_txbuf_mgmt); 3746} 3747 3748static struct ieee80211_node * 3749ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 3750{ 3751 struct ieee80211com *ic = vap->iv_ic; 3752 struct ath_softc *sc = ic->ic_softc; 3753 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; 3754 struct ath_node *an; 3755 3756 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); 3757 if (an == NULL) { 3758 /* XXX stat+msg */ 3759 return NULL; 3760 } 3761 ath_rate_node_init(sc, an); 3762 3763 /* Setup the mutex - there's no associd yet so set the name to NULL */ 3764 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", 3765 device_get_nameunit(sc->sc_dev), an); 3766 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); 3767 3768 /* XXX setup ath_tid */ 3769 ath_tx_tid_init(sc, an); 3770 3771 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); 3772 return &an->an_node; 3773} 3774 3775static void 3776ath_node_cleanup(struct ieee80211_node *ni) 3777{ 3778 struct ieee80211com *ic = ni->ni_ic; 3779 struct ath_softc *sc = ic->ic_softc; 3780 3781 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 3782 ni->ni_macaddr, ":", ATH_NODE(ni)); 3783 3784 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ 3785 ath_tx_node_flush(sc, ATH_NODE(ni)); 3786 ath_rate_node_cleanup(sc, ATH_NODE(ni)); 3787 sc->sc_node_cleanup(ni); 3788} 3789 3790static void 3791ath_node_free(struct ieee80211_node *ni) 3792{ 3793 struct ieee80211com *ic = ni->ni_ic; 3794 struct ath_softc *sc = ic->ic_softc; 3795 3796 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, 3797 ni->ni_macaddr, ":", ATH_NODE(ni)); 3798 mtx_destroy(&ATH_NODE(ni)->an_mtx); 3799 sc->sc_node_free(ni); 3800} 3801 3802static void 3803ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) 3804{ 3805 struct ieee80211com *ic = ni->ni_ic; 3806 struct ath_softc *sc = ic->ic_softc; 3807 struct ath_hal *ah = sc->sc_ah; 3808 3809 *rssi = ic->ic_node_getrssi(ni); 3810 if (ni->ni_chan != IEEE80211_CHAN_ANYC) 3811 *noise = ath_hal_getchannoise(ah, ni->ni_chan); 3812 else 3813 *noise = -95; /* nominally correct */ 3814} 3815 3816/* 3817 * Set the default antenna. 3818 */ 3819void 3820ath_setdefantenna(struct ath_softc *sc, u_int antenna) 3821{ 3822 struct ath_hal *ah = sc->sc_ah; 3823 3824 /* XXX block beacon interrupts */ 3825 ath_hal_setdefantenna(ah, antenna); 3826 if (sc->sc_defant != antenna) 3827 sc->sc_stats.ast_ant_defswitch++; 3828 sc->sc_defant = antenna; 3829 sc->sc_rxotherant = 0; 3830} 3831 3832static void 3833ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) 3834{ 3835 txq->axq_qnum = qnum; 3836 txq->axq_ac = 0; 3837 txq->axq_depth = 0; 3838 txq->axq_aggr_depth = 0; 3839 txq->axq_intrcnt = 0; 3840 txq->axq_link = NULL; 3841 txq->axq_softc = sc; 3842 TAILQ_INIT(&txq->axq_q); 3843 TAILQ_INIT(&txq->axq_tidq); 3844 TAILQ_INIT(&txq->fifo.axq_q); 3845 ATH_TXQ_LOCK_INIT(sc, txq); 3846} 3847 3848/* 3849 * Setup a h/w transmit queue. 3850 */ 3851static struct ath_txq * 3852ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 3853{ 3854 struct ath_hal *ah = sc->sc_ah; 3855 HAL_TXQ_INFO qi; 3856 int qnum; 3857 3858 memset(&qi, 0, sizeof(qi)); 3859 qi.tqi_subtype = subtype; 3860 qi.tqi_aifs = HAL_TXQ_USEDEFAULT; 3861 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; 3862 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; 3863 /* 3864 * Enable interrupts only for EOL and DESC conditions. 3865 * We mark tx descriptors to receive a DESC interrupt 3866 * when a tx queue gets deep; otherwise waiting for the 3867 * EOL to reap descriptors. Note that this is done to 3868 * reduce interrupt load and this only defers reaping 3869 * descriptors, never transmitting frames. Aside from 3870 * reducing interrupts this also permits more concurrency. 3871 * The only potential downside is if the tx queue backs 3872 * up in which case the top half of the kernel may backup 3873 * due to a lack of tx descriptors. 3874 */ 3875 if (sc->sc_isedma) 3876 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 3877 HAL_TXQ_TXOKINT_ENABLE; 3878 else 3879 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | 3880 HAL_TXQ_TXDESCINT_ENABLE; 3881 3882 qnum = ath_hal_setuptxqueue(ah, qtype, &qi); 3883 if (qnum == -1) { 3884 /* 3885 * NB: don't print a message, this happens 3886 * normally on parts with too few tx queues 3887 */ 3888 return NULL; 3889 } 3890 if (qnum >= nitems(sc->sc_txq)) { 3891 device_printf(sc->sc_dev, 3892 "hal qnum %u out of range, max %zu!\n", 3893 qnum, nitems(sc->sc_txq)); 3894 ath_hal_releasetxqueue(ah, qnum); 3895 return NULL; 3896 } 3897 if (!ATH_TXQ_SETUP(sc, qnum)) { 3898 ath_txq_init(sc, &sc->sc_txq[qnum], qnum); 3899 sc->sc_txqsetup |= 1<<qnum; 3900 } 3901 return &sc->sc_txq[qnum]; 3902} 3903 3904/* 3905 * Setup a hardware data transmit queue for the specified 3906 * access control. The hal may not support all requested 3907 * queues in which case it will return a reference to a 3908 * previously setup queue. We record the mapping from ac's 3909 * to h/w queues for use by ath_tx_start and also track 3910 * the set of h/w queues being used to optimize work in the 3911 * transmit interrupt handler and related routines. 3912 */ 3913static int 3914ath_tx_setup(struct ath_softc *sc, int ac, int haltype) 3915{ 3916 struct ath_txq *txq; 3917 3918 if (ac >= nitems(sc->sc_ac2q)) { 3919 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", 3920 ac, nitems(sc->sc_ac2q)); 3921 return 0; 3922 } 3923 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); 3924 if (txq != NULL) { 3925 txq->axq_ac = ac; 3926 sc->sc_ac2q[ac] = txq; 3927 return 1; 3928 } else 3929 return 0; 3930} 3931 3932/* 3933 * Update WME parameters for a transmit queue. 3934 */ 3935static int 3936ath_txq_update(struct ath_softc *sc, int ac) 3937{ 3938#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) 3939 struct ieee80211com *ic = &sc->sc_ic; 3940 struct ath_txq *txq = sc->sc_ac2q[ac]; 3941 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; 3942 struct ath_hal *ah = sc->sc_ah; 3943 HAL_TXQ_INFO qi; 3944 3945 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); 3946#ifdef IEEE80211_SUPPORT_TDMA 3947 if (sc->sc_tdma) { 3948 /* 3949 * AIFS is zero so there's no pre-transmit wait. The 3950 * burst time defines the slot duration and is configured 3951 * through net80211. The QCU is setup to not do post-xmit 3952 * back off, lockout all lower-priority QCU's, and fire 3953 * off the DMA beacon alert timer which is setup based 3954 * on the slot configuration. 3955 */ 3956 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3957 | HAL_TXQ_TXERRINT_ENABLE 3958 | HAL_TXQ_TXURNINT_ENABLE 3959 | HAL_TXQ_TXEOLINT_ENABLE 3960 | HAL_TXQ_DBA_GATED 3961 | HAL_TXQ_BACKOFF_DISABLE 3962 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 3963 ; 3964 qi.tqi_aifs = 0; 3965 /* XXX +dbaprep? */ 3966 qi.tqi_readyTime = sc->sc_tdmaslotlen; 3967 qi.tqi_burstTime = qi.tqi_readyTime; 3968 } else { 3969#endif 3970 /* 3971 * XXX shouldn't this just use the default flags 3972 * used in the previous queue setup? 3973 */ 3974 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE 3975 | HAL_TXQ_TXERRINT_ENABLE 3976 | HAL_TXQ_TXDESCINT_ENABLE 3977 | HAL_TXQ_TXURNINT_ENABLE 3978 | HAL_TXQ_TXEOLINT_ENABLE 3979 ; 3980 qi.tqi_aifs = wmep->wmep_aifsn; 3981 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); 3982 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); 3983 qi.tqi_readyTime = 0; 3984 qi.tqi_burstTime = IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit); 3985#ifdef IEEE80211_SUPPORT_TDMA 3986 } 3987#endif 3988 3989 DPRINTF(sc, ATH_DEBUG_RESET, 3990 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", 3991 __func__, txq->axq_qnum, qi.tqi_qflags, 3992 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); 3993 3994 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { 3995 device_printf(sc->sc_dev, "unable to update hardware queue " 3996 "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); 3997 return 0; 3998 } else { 3999 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ 4000 return 1; 4001 } 4002#undef ATH_EXPONENT_TO_VALUE 4003} 4004 4005/* 4006 * Callback from the 802.11 layer to update WME parameters. 4007 */ 4008int 4009ath_wme_update(struct ieee80211com *ic) 4010{ 4011 struct ath_softc *sc = ic->ic_softc; 4012 4013 return !ath_txq_update(sc, WME_AC_BE) || 4014 !ath_txq_update(sc, WME_AC_BK) || 4015 !ath_txq_update(sc, WME_AC_VI) || 4016 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; 4017} 4018 4019/* 4020 * Reclaim resources for a setup queue. 4021 */ 4022static void 4023ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 4024{ 4025 4026 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); 4027 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 4028 ATH_TXQ_LOCK_DESTROY(txq); 4029} 4030 4031/* 4032 * Reclaim all tx queue resources. 4033 */ 4034static void 4035ath_tx_cleanup(struct ath_softc *sc) 4036{ 4037 int i; 4038 4039 ATH_TXBUF_LOCK_DESTROY(sc); 4040 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4041 if (ATH_TXQ_SETUP(sc, i)) 4042 ath_tx_cleanupq(sc, &sc->sc_txq[i]); 4043} 4044 4045/* 4046 * Return h/w rate index for an IEEE rate (w/o basic rate bit) 4047 * using the current rates in sc_rixmap. 4048 */ 4049int 4050ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) 4051{ 4052 int rix = sc->sc_rixmap[rate]; 4053 /* NB: return lowest rix for invalid rate */ 4054 return (rix == 0xff ? 0 : rix); 4055} 4056 4057static void 4058ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, 4059 struct ath_buf *bf) 4060{ 4061 struct ieee80211_node *ni = bf->bf_node; 4062 struct ieee80211com *ic = &sc->sc_ic; 4063 int sr, lr, pri; 4064 4065 if (ts->ts_status == 0) { 4066 u_int8_t txant = ts->ts_antenna; 4067 sc->sc_stats.ast_ant_tx[txant]++; 4068 sc->sc_ant_tx[txant]++; 4069 if (ts->ts_finaltsi != 0) 4070 sc->sc_stats.ast_tx_altrate++; 4071 pri = M_WME_GETAC(bf->bf_m); 4072 if (pri >= WME_AC_VO) 4073 ic->ic_wme.wme_hipri_traffic++; 4074 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) 4075 ni->ni_inact = ni->ni_inact_reload; 4076 } else { 4077 if (ts->ts_status & HAL_TXERR_XRETRY) 4078 sc->sc_stats.ast_tx_xretries++; 4079 if (ts->ts_status & HAL_TXERR_FIFO) 4080 sc->sc_stats.ast_tx_fifoerr++; 4081 if (ts->ts_status & HAL_TXERR_FILT) 4082 sc->sc_stats.ast_tx_filtered++; 4083 if (ts->ts_status & HAL_TXERR_XTXOP) 4084 sc->sc_stats.ast_tx_xtxop++; 4085 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) 4086 sc->sc_stats.ast_tx_timerexpired++; 4087 4088 if (bf->bf_m->m_flags & M_FF) 4089 sc->sc_stats.ast_ff_txerr++; 4090 } 4091 /* XXX when is this valid? */ 4092 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) 4093 sc->sc_stats.ast_tx_desccfgerr++; 4094 /* 4095 * This can be valid for successful frame transmission! 4096 * If there's a TX FIFO underrun during aggregate transmission, 4097 * the MAC will pad the rest of the aggregate with delimiters. 4098 * If a BA is returned, the frame is marked as "OK" and it's up 4099 * to the TX completion code to notice which frames weren't 4100 * successfully transmitted. 4101 */ 4102 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) 4103 sc->sc_stats.ast_tx_data_underrun++; 4104 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) 4105 sc->sc_stats.ast_tx_delim_underrun++; 4106 4107 sr = ts->ts_shortretry; 4108 lr = ts->ts_longretry; 4109 sc->sc_stats.ast_tx_shortretry += sr; 4110 sc->sc_stats.ast_tx_longretry += lr; 4111 4112} 4113 4114/* 4115 * The default completion. If fail is 1, this means 4116 * "please don't retry the frame, and just return -1 status 4117 * to the net80211 stack. 4118 */ 4119void 4120ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) 4121{ 4122 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; 4123 int st; 4124 4125 if (fail == 1) 4126 st = -1; 4127 else 4128 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? 4129 ts->ts_status : HAL_TXERR_XRETRY; 4130 4131#if 0 4132 if (bf->bf_state.bfs_dobaw) 4133 device_printf(sc->sc_dev, 4134 "%s: bf %p: seqno %d: dobaw should've been cleared!\n", 4135 __func__, 4136 bf, 4137 SEQNO(bf->bf_state.bfs_seqno)); 4138#endif 4139 if (bf->bf_next != NULL) 4140 device_printf(sc->sc_dev, 4141 "%s: bf %p: seqno %d: bf_next not NULL!\n", 4142 __func__, 4143 bf, 4144 SEQNO(bf->bf_state.bfs_seqno)); 4145 4146 /* 4147 * Check if the node software queue is empty; if so 4148 * then clear the TIM. 4149 * 4150 * This needs to be done before the buffer is freed as 4151 * otherwise the node reference will have been released 4152 * and the node may not actually exist any longer. 4153 * 4154 * XXX I don't like this belonging here, but it's cleaner 4155 * to do it here right now then all the other places 4156 * where ath_tx_default_comp() is called. 4157 * 4158 * XXX TODO: during drain, ensure that the callback is 4159 * being called so we get a chance to update the TIM. 4160 */ 4161 if (bf->bf_node) { 4162 ATH_TX_LOCK(sc); 4163 ath_tx_update_tim(sc, bf->bf_node, 0); 4164 ATH_TX_UNLOCK(sc); 4165 } 4166 4167 /* 4168 * Do any tx complete callback. Note this must 4169 * be done before releasing the node reference. 4170 * This will free the mbuf, release the net80211 4171 * node and recycle the ath_buf. 4172 */ 4173 ath_tx_freebuf(sc, bf, st); 4174} 4175 4176/* 4177 * Update rate control with the given completion status. 4178 */ 4179void 4180ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, 4181 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, 4182 int nframes, int nbad) 4183{ 4184 struct ath_node *an; 4185 4186 /* Only for unicast frames */ 4187 if (ni == NULL) 4188 return; 4189 4190 an = ATH_NODE(ni); 4191 ATH_NODE_UNLOCK_ASSERT(an); 4192 4193 if ((ts->ts_status & HAL_TXERR_FILT) == 0) { 4194 ATH_NODE_LOCK(an); 4195 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); 4196 ATH_NODE_UNLOCK(an); 4197 } 4198} 4199 4200/* 4201 * Process the completion of the given buffer. 4202 * 4203 * This calls the rate control update and then the buffer completion. 4204 * This will either free the buffer or requeue it. In any case, the 4205 * bf pointer should be treated as invalid after this function is called. 4206 */ 4207void 4208ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, 4209 struct ath_tx_status *ts, struct ath_buf *bf) 4210{ 4211 struct ieee80211_node *ni = bf->bf_node; 4212 4213 ATH_TX_UNLOCK_ASSERT(sc); 4214 ATH_TXQ_UNLOCK_ASSERT(txq); 4215 4216 /* If unicast frame, update general statistics */ 4217 if (ni != NULL) { 4218 /* update statistics */ 4219 ath_tx_update_stats(sc, ts, bf); 4220 } 4221 4222 /* 4223 * Call the completion handler. 4224 * The completion handler is responsible for 4225 * calling the rate control code. 4226 * 4227 * Frames with no completion handler get the 4228 * rate control code called here. 4229 */ 4230 if (bf->bf_comp == NULL) { 4231 if ((ts->ts_status & HAL_TXERR_FILT) == 0 && 4232 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { 4233 /* 4234 * XXX assume this isn't an aggregate 4235 * frame. 4236 */ 4237 ath_tx_update_ratectrl(sc, ni, 4238 bf->bf_state.bfs_rc, ts, 4239 bf->bf_state.bfs_pktlen, 1, 4240 (ts->ts_status == 0 ? 0 : 1)); 4241 } 4242 ath_tx_default_comp(sc, bf, 0); 4243 } else 4244 bf->bf_comp(sc, bf, 0); 4245} 4246 4247 4248 4249/* 4250 * Process completed xmit descriptors from the specified queue. 4251 * Kick the packet scheduler if needed. This can occur from this 4252 * particular task. 4253 */ 4254static int 4255ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) 4256{ 4257 struct ath_hal *ah = sc->sc_ah; 4258 struct ath_buf *bf; 4259 struct ath_desc *ds; 4260 struct ath_tx_status *ts; 4261 struct ieee80211_node *ni; 4262#ifdef IEEE80211_SUPPORT_SUPERG 4263 struct ieee80211com *ic = &sc->sc_ic; 4264#endif /* IEEE80211_SUPPORT_SUPERG */ 4265 int nacked; 4266 HAL_STATUS status; 4267 4268 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", 4269 __func__, txq->axq_qnum, 4270 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4271 txq->axq_link); 4272 4273 ATH_KTR(sc, ATH_KTR_TXCOMP, 4, 4274 "ath_tx_processq: txq=%u head %p link %p depth %p", 4275 txq->axq_qnum, 4276 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), 4277 txq->axq_link, 4278 txq->axq_depth); 4279 4280 nacked = 0; 4281 for (;;) { 4282 ATH_TXQ_LOCK(txq); 4283 txq->axq_intrcnt = 0; /* reset periodic desc intr count */ 4284 bf = TAILQ_FIRST(&txq->axq_q); 4285 if (bf == NULL) { 4286 ATH_TXQ_UNLOCK(txq); 4287 break; 4288 } 4289 ds = bf->bf_lastds; /* XXX must be setup correctly! */ 4290 ts = &bf->bf_status.ds_txstat; 4291 4292 status = ath_hal_txprocdesc(ah, ds, ts); 4293#ifdef ATH_DEBUG 4294 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) 4295 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4296 status == HAL_OK); 4297 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) 4298 ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 4299 status == HAL_OK); 4300#endif 4301#ifdef ATH_DEBUG_ALQ 4302 if (if_ath_alq_checkdebug(&sc->sc_alq, 4303 ATH_ALQ_EDMA_TXSTATUS)) { 4304 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, 4305 sc->sc_tx_statuslen, 4306 (char *) ds); 4307 } 4308#endif 4309 4310 if (status == HAL_EINPROGRESS) { 4311 ATH_KTR(sc, ATH_KTR_TXCOMP, 3, 4312 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", 4313 txq->axq_qnum, bf, ds); 4314 ATH_TXQ_UNLOCK(txq); 4315 break; 4316 } 4317 ATH_TXQ_REMOVE(txq, bf, bf_list); 4318 4319 /* 4320 * Sanity check. 4321 */ 4322 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { 4323 device_printf(sc->sc_dev, 4324 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", 4325 __func__, 4326 txq->axq_qnum, 4327 bf, 4328 bf->bf_state.bfs_tx_queue); 4329 } 4330 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { 4331 device_printf(sc->sc_dev, 4332 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", 4333 __func__, 4334 txq->axq_qnum, 4335 bf->bf_last, 4336 bf->bf_last->bf_state.bfs_tx_queue); 4337 } 4338 4339#if 0 4340 if (txq->axq_depth > 0) { 4341 /* 4342 * More frames follow. Mark the buffer busy 4343 * so it's not re-used while the hardware may 4344 * still re-read the link field in the descriptor. 4345 * 4346 * Use the last buffer in an aggregate as that 4347 * is where the hardware may be - intermediate 4348 * descriptors won't be "busy". 4349 */ 4350 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4351 } else 4352 txq->axq_link = NULL; 4353#else 4354 bf->bf_last->bf_flags |= ATH_BUF_BUSY; 4355#endif 4356 if (bf->bf_state.bfs_aggr) 4357 txq->axq_aggr_depth--; 4358 4359 ni = bf->bf_node; 4360 4361 ATH_KTR(sc, ATH_KTR_TXCOMP, 5, 4362 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", 4363 txq->axq_qnum, bf, ds, ni, ts->ts_status); 4364 /* 4365 * If unicast frame was ack'd update RSSI, 4366 * including the last rx time used to 4367 * workaround phantom bmiss interrupts. 4368 */ 4369 if (ni != NULL && ts->ts_status == 0 && 4370 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { 4371 nacked++; 4372 sc->sc_stats.ast_tx_rssi = ts->ts_rssi; 4373 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, 4374 ts->ts_rssi); 4375 } 4376 ATH_TXQ_UNLOCK(txq); 4377 4378 /* 4379 * Update statistics and call completion 4380 */ 4381 ath_tx_process_buf_completion(sc, txq, ts, bf); 4382 4383 /* XXX at this point, bf and ni may be totally invalid */ 4384 } 4385#ifdef IEEE80211_SUPPORT_SUPERG 4386 /* 4387 * Flush fast-frame staging queue when traffic slows. 4388 */ 4389 if (txq->axq_depth <= 1) 4390 ieee80211_ff_flush(ic, txq->axq_ac); 4391#endif 4392 4393 /* Kick the software TXQ scheduler */ 4394 if (dosched) { 4395 ATH_TX_LOCK(sc); 4396 ath_txq_sched(sc, txq); 4397 ATH_TX_UNLOCK(sc); 4398 } 4399 4400 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4401 "ath_tx_processq: txq=%u: done", 4402 txq->axq_qnum); 4403 4404 return nacked; 4405} 4406 4407#define TXQACTIVE(t, q) ( (t) & (1 << (q))) 4408 4409/* 4410 * Deferred processing of transmit interrupt; special-cased 4411 * for a single hardware transmit queue (e.g. 5210 and 5211). 4412 */ 4413static void 4414ath_tx_proc_q0(void *arg, int npending) 4415{ 4416 struct ath_softc *sc = arg; 4417 uint32_t txqs; 4418 4419 ATH_PCU_LOCK(sc); 4420 sc->sc_txproc_cnt++; 4421 txqs = sc->sc_txq_active; 4422 sc->sc_txq_active &= ~txqs; 4423 ATH_PCU_UNLOCK(sc); 4424 4425 ATH_LOCK(sc); 4426 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4427 ATH_UNLOCK(sc); 4428 4429 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4430 "ath_tx_proc_q0: txqs=0x%08x", txqs); 4431 4432 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) 4433 /* XXX why is lastrx updated in tx code? */ 4434 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4435 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4436 ath_tx_processq(sc, sc->sc_cabq, 1); 4437 sc->sc_wd_timer = 0; 4438 4439 if (sc->sc_softled) 4440 ath_led_event(sc, sc->sc_txrix); 4441 4442 ATH_PCU_LOCK(sc); 4443 sc->sc_txproc_cnt--; 4444 ATH_PCU_UNLOCK(sc); 4445 4446 ATH_LOCK(sc); 4447 ath_power_restore_power_state(sc); 4448 ATH_UNLOCK(sc); 4449 4450 ath_tx_kick(sc); 4451} 4452 4453/* 4454 * Deferred processing of transmit interrupt; special-cased 4455 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). 4456 */ 4457static void 4458ath_tx_proc_q0123(void *arg, int npending) 4459{ 4460 struct ath_softc *sc = arg; 4461 int nacked; 4462 uint32_t txqs; 4463 4464 ATH_PCU_LOCK(sc); 4465 sc->sc_txproc_cnt++; 4466 txqs = sc->sc_txq_active; 4467 sc->sc_txq_active &= ~txqs; 4468 ATH_PCU_UNLOCK(sc); 4469 4470 ATH_LOCK(sc); 4471 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4472 ATH_UNLOCK(sc); 4473 4474 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, 4475 "ath_tx_proc_q0123: txqs=0x%08x", txqs); 4476 4477 /* 4478 * Process each active queue. 4479 */ 4480 nacked = 0; 4481 if (TXQACTIVE(txqs, 0)) 4482 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); 4483 if (TXQACTIVE(txqs, 1)) 4484 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); 4485 if (TXQACTIVE(txqs, 2)) 4486 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); 4487 if (TXQACTIVE(txqs, 3)) 4488 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); 4489 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) 4490 ath_tx_processq(sc, sc->sc_cabq, 1); 4491 if (nacked) 4492 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4493 4494 sc->sc_wd_timer = 0; 4495 4496 if (sc->sc_softled) 4497 ath_led_event(sc, sc->sc_txrix); 4498 4499 ATH_PCU_LOCK(sc); 4500 sc->sc_txproc_cnt--; 4501 ATH_PCU_UNLOCK(sc); 4502 4503 ATH_LOCK(sc); 4504 ath_power_restore_power_state(sc); 4505 ATH_UNLOCK(sc); 4506 4507 ath_tx_kick(sc); 4508} 4509 4510/* 4511 * Deferred processing of transmit interrupt. 4512 */ 4513static void 4514ath_tx_proc(void *arg, int npending) 4515{ 4516 struct ath_softc *sc = arg; 4517 int i, nacked; 4518 uint32_t txqs; 4519 4520 ATH_PCU_LOCK(sc); 4521 sc->sc_txproc_cnt++; 4522 txqs = sc->sc_txq_active; 4523 sc->sc_txq_active &= ~txqs; 4524 ATH_PCU_UNLOCK(sc); 4525 4526 ATH_LOCK(sc); 4527 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4528 ATH_UNLOCK(sc); 4529 4530 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); 4531 4532 /* 4533 * Process each active queue. 4534 */ 4535 nacked = 0; 4536 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) 4537 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) 4538 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); 4539 if (nacked) 4540 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); 4541 4542 sc->sc_wd_timer = 0; 4543 4544 if (sc->sc_softled) 4545 ath_led_event(sc, sc->sc_txrix); 4546 4547 ATH_PCU_LOCK(sc); 4548 sc->sc_txproc_cnt--; 4549 ATH_PCU_UNLOCK(sc); 4550 4551 ATH_LOCK(sc); 4552 ath_power_restore_power_state(sc); 4553 ATH_UNLOCK(sc); 4554 4555 ath_tx_kick(sc); 4556} 4557#undef TXQACTIVE 4558 4559/* 4560 * Deferred processing of TXQ rescheduling. 4561 */ 4562static void 4563ath_txq_sched_tasklet(void *arg, int npending) 4564{ 4565 struct ath_softc *sc = arg; 4566 int i; 4567 4568 /* XXX is skipping ok? */ 4569 ATH_PCU_LOCK(sc); 4570#if 0 4571 if (sc->sc_inreset_cnt > 0) { 4572 device_printf(sc->sc_dev, 4573 "%s: sc_inreset_cnt > 0; skipping\n", __func__); 4574 ATH_PCU_UNLOCK(sc); 4575 return; 4576 } 4577#endif 4578 sc->sc_txproc_cnt++; 4579 ATH_PCU_UNLOCK(sc); 4580 4581 ATH_LOCK(sc); 4582 ath_power_set_power_state(sc, HAL_PM_AWAKE); 4583 ATH_UNLOCK(sc); 4584 4585 ATH_TX_LOCK(sc); 4586 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4587 if (ATH_TXQ_SETUP(sc, i)) { 4588 ath_txq_sched(sc, &sc->sc_txq[i]); 4589 } 4590 } 4591 ATH_TX_UNLOCK(sc); 4592 4593 ATH_LOCK(sc); 4594 ath_power_restore_power_state(sc); 4595 ATH_UNLOCK(sc); 4596 4597 ATH_PCU_LOCK(sc); 4598 sc->sc_txproc_cnt--; 4599 ATH_PCU_UNLOCK(sc); 4600} 4601 4602void 4603ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) 4604{ 4605 4606 ATH_TXBUF_LOCK_ASSERT(sc); 4607 4608 if (bf->bf_flags & ATH_BUF_MGMT) 4609 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); 4610 else { 4611 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); 4612 sc->sc_txbuf_cnt++; 4613 if (sc->sc_txbuf_cnt > ath_txbuf) { 4614 device_printf(sc->sc_dev, 4615 "%s: sc_txbuf_cnt > %d?\n", 4616 __func__, 4617 ath_txbuf); 4618 sc->sc_txbuf_cnt = ath_txbuf; 4619 } 4620 } 4621} 4622 4623void 4624ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) 4625{ 4626 4627 ATH_TXBUF_LOCK_ASSERT(sc); 4628 4629 if (bf->bf_flags & ATH_BUF_MGMT) 4630 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); 4631 else { 4632 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); 4633 sc->sc_txbuf_cnt++; 4634 if (sc->sc_txbuf_cnt > ATH_TXBUF) { 4635 device_printf(sc->sc_dev, 4636 "%s: sc_txbuf_cnt > %d?\n", 4637 __func__, 4638 ATH_TXBUF); 4639 sc->sc_txbuf_cnt = ATH_TXBUF; 4640 } 4641 } 4642} 4643 4644/* 4645 * Free the holding buffer if it exists 4646 */ 4647void 4648ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) 4649{ 4650 ATH_TXBUF_UNLOCK_ASSERT(sc); 4651 ATH_TXQ_LOCK_ASSERT(txq); 4652 4653 if (txq->axq_holdingbf == NULL) 4654 return; 4655 4656 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; 4657 4658 ATH_TXBUF_LOCK(sc); 4659 ath_returnbuf_tail(sc, txq->axq_holdingbf); 4660 ATH_TXBUF_UNLOCK(sc); 4661 4662 txq->axq_holdingbf = NULL; 4663} 4664 4665/* 4666 * Add this buffer to the holding queue, freeing the previous 4667 * one if it exists. 4668 */ 4669static void 4670ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) 4671{ 4672 struct ath_txq *txq; 4673 4674 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4675 4676 ATH_TXBUF_UNLOCK_ASSERT(sc); 4677 ATH_TXQ_LOCK_ASSERT(txq); 4678 4679 /* XXX assert ATH_BUF_BUSY is set */ 4680 4681 /* XXX assert the tx queue is under the max number */ 4682 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { 4683 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", 4684 __func__, 4685 bf, 4686 bf->bf_state.bfs_tx_queue); 4687 bf->bf_flags &= ~ATH_BUF_BUSY; 4688 ath_returnbuf_tail(sc, bf); 4689 return; 4690 } 4691 ath_txq_freeholdingbuf(sc, txq); 4692 txq->axq_holdingbf = bf; 4693} 4694 4695/* 4696 * Return a buffer to the pool and update the 'busy' flag on the 4697 * previous 'tail' entry. 4698 * 4699 * This _must_ only be called when the buffer is involved in a completed 4700 * TX. The logic is that if it was part of an active TX, the previous 4701 * buffer on the list is now not involved in a halted TX DMA queue, waiting 4702 * for restart (eg for TDMA.) 4703 * 4704 * The caller must free the mbuf and recycle the node reference. 4705 * 4706 * XXX This method of handling busy / holding buffers is insanely stupid. 4707 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would 4708 * be much nicer if buffers in the processq() methods would instead be 4709 * always completed there (pushed onto a txq or ath_bufhead) so we knew 4710 * exactly what hardware queue they came from in the first place. 4711 */ 4712void 4713ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) 4714{ 4715 struct ath_txq *txq; 4716 4717 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; 4718 4719 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); 4720 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); 4721 4722 /* 4723 * If this buffer is busy, push it onto the holding queue. 4724 */ 4725 if (bf->bf_flags & ATH_BUF_BUSY) { 4726 ATH_TXQ_LOCK(txq); 4727 ath_txq_addholdingbuf(sc, bf); 4728 ATH_TXQ_UNLOCK(txq); 4729 return; 4730 } 4731 4732 /* 4733 * Not a busy buffer, so free normally 4734 */ 4735 ATH_TXBUF_LOCK(sc); 4736 ath_returnbuf_tail(sc, bf); 4737 ATH_TXBUF_UNLOCK(sc); 4738} 4739 4740/* 4741 * This is currently used by ath_tx_draintxq() and 4742 * ath_tx_tid_free_pkts(). 4743 * 4744 * It recycles a single ath_buf. 4745 */ 4746void 4747ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) 4748{ 4749 struct ieee80211_node *ni = bf->bf_node; 4750 struct mbuf *m0 = bf->bf_m; 4751 4752 /* 4753 * Make sure that we only sync/unload if there's an mbuf. 4754 * If not (eg we cloned a buffer), the unload will have already 4755 * occurred. 4756 */ 4757 if (bf->bf_m != NULL) { 4758 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, 4759 BUS_DMASYNC_POSTWRITE); 4760 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); 4761 } 4762 4763 bf->bf_node = NULL; 4764 bf->bf_m = NULL; 4765 4766 /* Free the buffer, it's not needed any longer */ 4767 ath_freebuf(sc, bf); 4768 4769 /* Pass the buffer back to net80211 - completing it */ 4770 ieee80211_tx_complete(ni, m0, status); 4771} 4772 4773static struct ath_buf * 4774ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) 4775{ 4776 struct ath_buf *bf; 4777 4778 ATH_TXQ_LOCK_ASSERT(txq); 4779 4780 /* 4781 * Drain the FIFO queue first, then if it's 4782 * empty, move to the normal frame queue. 4783 */ 4784 bf = TAILQ_FIRST(&txq->fifo.axq_q); 4785 if (bf != NULL) { 4786 /* 4787 * Is it the last buffer in this set? 4788 * Decrement the FIFO counter. 4789 */ 4790 if (bf->bf_flags & ATH_BUF_FIFOEND) { 4791 if (txq->axq_fifo_depth == 0) { 4792 device_printf(sc->sc_dev, 4793 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", 4794 __func__, 4795 txq->axq_qnum, 4796 txq->fifo.axq_depth); 4797 } else 4798 txq->axq_fifo_depth--; 4799 } 4800 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); 4801 return (bf); 4802 } 4803 4804 /* 4805 * Debugging! 4806 */ 4807 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { 4808 device_printf(sc->sc_dev, 4809 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", 4810 __func__, 4811 txq->axq_qnum, 4812 txq->axq_fifo_depth, 4813 txq->fifo.axq_depth); 4814 } 4815 4816 /* 4817 * Now drain the pending queue. 4818 */ 4819 bf = TAILQ_FIRST(&txq->axq_q); 4820 if (bf == NULL) { 4821 txq->axq_link = NULL; 4822 return (NULL); 4823 } 4824 ATH_TXQ_REMOVE(txq, bf, bf_list); 4825 return (bf); 4826} 4827 4828void 4829ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) 4830{ 4831#ifdef ATH_DEBUG 4832 struct ath_hal *ah = sc->sc_ah; 4833#endif 4834 struct ath_buf *bf; 4835 u_int ix; 4836 4837 /* 4838 * NB: this assumes output has been stopped and 4839 * we do not need to block ath_tx_proc 4840 */ 4841 for (ix = 0;; ix++) { 4842 ATH_TXQ_LOCK(txq); 4843 bf = ath_tx_draintxq_get_one(sc, txq); 4844 if (bf == NULL) { 4845 ATH_TXQ_UNLOCK(txq); 4846 break; 4847 } 4848 if (bf->bf_state.bfs_aggr) 4849 txq->axq_aggr_depth--; 4850#ifdef ATH_DEBUG 4851 if (sc->sc_debug & ATH_DEBUG_RESET) { 4852 struct ieee80211com *ic = &sc->sc_ic; 4853 int status = 0; 4854 4855 /* 4856 * EDMA operation has a TX completion FIFO 4857 * separate from the TX descriptor, so this 4858 * method of checking the "completion" status 4859 * is wrong. 4860 */ 4861 if (! sc->sc_isedma) { 4862 status = (ath_hal_txprocdesc(ah, 4863 bf->bf_lastds, 4864 &bf->bf_status.ds_txstat) == HAL_OK); 4865 } 4866 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); 4867 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), 4868 bf->bf_m->m_len, 0, -1); 4869 } 4870#endif /* ATH_DEBUG */ 4871 /* 4872 * Since we're now doing magic in the completion 4873 * functions, we -must- call it for aggregation 4874 * destinations or BAW tracking will get upset. 4875 */ 4876 /* 4877 * Clear ATH_BUF_BUSY; the completion handler 4878 * will free the buffer. 4879 */ 4880 ATH_TXQ_UNLOCK(txq); 4881 bf->bf_flags &= ~ATH_BUF_BUSY; 4882 if (bf->bf_comp) 4883 bf->bf_comp(sc, bf, 1); 4884 else 4885 ath_tx_default_comp(sc, bf, 1); 4886 } 4887 4888 /* 4889 * Free the holding buffer if it exists 4890 */ 4891 ATH_TXQ_LOCK(txq); 4892 ath_txq_freeholdingbuf(sc, txq); 4893 ATH_TXQ_UNLOCK(txq); 4894 4895 /* 4896 * Drain software queued frames which are on 4897 * active TIDs. 4898 */ 4899 ath_tx_txq_drain(sc, txq); 4900} 4901 4902static void 4903ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 4904{ 4905 struct ath_hal *ah = sc->sc_ah; 4906 4907 ATH_TXQ_LOCK_ASSERT(txq); 4908 4909 DPRINTF(sc, ATH_DEBUG_RESET, 4910 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " 4911 "link %p, holdingbf=%p\n", 4912 __func__, 4913 txq->axq_qnum, 4914 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), 4915 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), 4916 (int) ath_hal_numtxpending(ah, txq->axq_qnum), 4917 txq->axq_flags, 4918 txq->axq_link, 4919 txq->axq_holdingbf); 4920 4921 (void) ath_hal_stoptxdma(ah, txq->axq_qnum); 4922 /* We've stopped TX DMA, so mark this as stopped. */ 4923 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; 4924 4925#ifdef ATH_DEBUG 4926 if ((sc->sc_debug & ATH_DEBUG_RESET) 4927 && (txq->axq_holdingbf != NULL)) { 4928 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); 4929 } 4930#endif 4931} 4932 4933int 4934ath_stoptxdma(struct ath_softc *sc) 4935{ 4936 struct ath_hal *ah = sc->sc_ah; 4937 int i; 4938 4939 /* XXX return value */ 4940 if (sc->sc_invalid) 4941 return 0; 4942 4943 if (!sc->sc_invalid) { 4944 /* don't touch the hardware if marked invalid */ 4945 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", 4946 __func__, sc->sc_bhalq, 4947 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), 4948 NULL); 4949 4950 /* stop the beacon queue */ 4951 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); 4952 4953 /* Stop the data queues */ 4954 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 4955 if (ATH_TXQ_SETUP(sc, i)) { 4956 ATH_TXQ_LOCK(&sc->sc_txq[i]); 4957 ath_tx_stopdma(sc, &sc->sc_txq[i]); 4958 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 4959 } 4960 } 4961 } 4962 4963 return 1; 4964} 4965 4966#ifdef ATH_DEBUG 4967void 4968ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) 4969{ 4970 struct ath_hal *ah = sc->sc_ah; 4971 struct ath_buf *bf; 4972 int i = 0; 4973 4974 if (! (sc->sc_debug & ATH_DEBUG_RESET)) 4975 return; 4976 4977 device_printf(sc->sc_dev, "%s: Q%d: begin\n", 4978 __func__, txq->axq_qnum); 4979 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { 4980 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 4981 ath_hal_txprocdesc(ah, bf->bf_lastds, 4982 &bf->bf_status.ds_txstat) == HAL_OK); 4983 i++; 4984 } 4985 device_printf(sc->sc_dev, "%s: Q%d: end\n", 4986 __func__, txq->axq_qnum); 4987} 4988#endif /* ATH_DEBUG */ 4989 4990/* 4991 * Drain the transmit queues and reclaim resources. 4992 */ 4993void 4994ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) 4995{ 4996 struct ath_hal *ah = sc->sc_ah; 4997 struct ath_buf *bf_last; 4998 int i; 4999 5000 (void) ath_stoptxdma(sc); 5001 5002 /* 5003 * Dump the queue contents 5004 */ 5005 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { 5006 /* 5007 * XXX TODO: should we just handle the completed TX frames 5008 * here, whether or not the reset is a full one or not? 5009 */ 5010 if (ATH_TXQ_SETUP(sc, i)) { 5011#ifdef ATH_DEBUG 5012 if (sc->sc_debug & ATH_DEBUG_RESET) 5013 ath_tx_dump(sc, &sc->sc_txq[i]); 5014#endif /* ATH_DEBUG */ 5015 if (reset_type == ATH_RESET_NOLOSS) { 5016 ath_tx_processq(sc, &sc->sc_txq[i], 0); 5017 ATH_TXQ_LOCK(&sc->sc_txq[i]); 5018 /* 5019 * Free the holding buffer; DMA is now 5020 * stopped. 5021 */ 5022 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); 5023 /* 5024 * Setup the link pointer to be the 5025 * _last_ buffer/descriptor in the list. 5026 * If there's nothing in the list, set it 5027 * to NULL. 5028 */ 5029 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], 5030 axq_q_s); 5031 if (bf_last != NULL) { 5032 ath_hal_gettxdesclinkptr(ah, 5033 bf_last->bf_lastds, 5034 &sc->sc_txq[i].axq_link); 5035 } else { 5036 sc->sc_txq[i].axq_link = NULL; 5037 } 5038 ATH_TXQ_UNLOCK(&sc->sc_txq[i]); 5039 } else 5040 ath_tx_draintxq(sc, &sc->sc_txq[i]); 5041 } 5042 } 5043#ifdef ATH_DEBUG 5044 if (sc->sc_debug & ATH_DEBUG_RESET) { 5045 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); 5046 if (bf != NULL && bf->bf_m != NULL) { 5047 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, 5048 ath_hal_txprocdesc(ah, bf->bf_lastds, 5049 &bf->bf_status.ds_txstat) == HAL_OK); 5050 ieee80211_dump_pkt(&sc->sc_ic, 5051 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 5052 0, -1); 5053 } 5054 } 5055#endif /* ATH_DEBUG */ 5056 sc->sc_wd_timer = 0; 5057} 5058 5059/* 5060 * Update internal state after a channel change. 5061 */ 5062static void 5063ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) 5064{ 5065 enum ieee80211_phymode mode; 5066 5067 /* 5068 * Change channels and update the h/w rate map 5069 * if we're switching; e.g. 11a to 11b/g. 5070 */ 5071 mode = ieee80211_chan2mode(chan); 5072 if (mode != sc->sc_curmode) 5073 ath_setcurmode(sc, mode); 5074 sc->sc_curchan = chan; 5075} 5076 5077/* 5078 * Set/change channels. If the channel is really being changed, 5079 * it's done by resetting the chip. To accomplish this we must 5080 * first cleanup any pending DMA, then restart stuff after a la 5081 * ath_init. 5082 */ 5083static int 5084ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) 5085{ 5086 struct ieee80211com *ic = &sc->sc_ic; 5087 struct ath_hal *ah = sc->sc_ah; 5088 int ret = 0; 5089 5090 /* Treat this as an interface reset */ 5091 ATH_PCU_UNLOCK_ASSERT(sc); 5092 ATH_UNLOCK_ASSERT(sc); 5093 5094 /* (Try to) stop TX/RX from occurring */ 5095 taskqueue_block(sc->sc_tq); 5096 5097 ATH_PCU_LOCK(sc); 5098 5099 /* Disable interrupts */ 5100 ath_hal_intrset(ah, 0); 5101 5102 /* Stop new RX/TX/interrupt completion */ 5103 if (ath_reset_grablock(sc, 1) == 0) { 5104 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", 5105 __func__); 5106 } 5107 5108 /* Stop pending RX/TX completion */ 5109 ath_txrx_stop_locked(sc); 5110 5111 ATH_PCU_UNLOCK(sc); 5112 5113 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", 5114 __func__, ieee80211_chan2ieee(ic, chan), 5115 chan->ic_freq, chan->ic_flags); 5116 if (chan != sc->sc_curchan) { 5117 HAL_STATUS status; 5118 /* 5119 * To switch channels clear any pending DMA operations; 5120 * wait long enough for the RX fifo to drain, reset the 5121 * hardware at the new frequency, and then re-enable 5122 * the relevant bits of the h/w. 5123 */ 5124#if 0 5125 ath_hal_intrset(ah, 0); /* disable interrupts */ 5126#endif 5127 ath_stoprecv(sc, 1); /* turn off frame recv */ 5128 /* 5129 * First, handle completed TX/RX frames. 5130 */ 5131 ath_rx_flush(sc); 5132 ath_draintxq(sc, ATH_RESET_NOLOSS); 5133 /* 5134 * Next, flush the non-scheduled frames. 5135 */ 5136 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ 5137 5138 ath_update_chainmasks(sc, chan); 5139 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, 5140 sc->sc_cur_rxchainmask); 5141 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, 5142 HAL_RESET_NORMAL, &status)) { 5143 device_printf(sc->sc_dev, "%s: unable to reset " 5144 "channel %u (%u MHz, flags 0x%x), hal status %u\n", 5145 __func__, ieee80211_chan2ieee(ic, chan), 5146 chan->ic_freq, chan->ic_flags, status); 5147 ret = EIO; 5148 goto finish; 5149 } 5150 sc->sc_diversity = ath_hal_getdiversity(ah); 5151 5152 ATH_RX_LOCK(sc); 5153 sc->sc_rx_stopped = 1; 5154 sc->sc_rx_resetted = 1; 5155 ATH_RX_UNLOCK(sc); 5156 5157 /* Let DFS at it in case it's a DFS channel */ 5158 ath_dfs_radar_enable(sc, chan); 5159 5160 /* Let spectral at in case spectral is enabled */ 5161 ath_spectral_enable(sc, chan); 5162 5163 /* 5164 * Let bluetooth coexistence at in case it's needed for this 5165 * channel 5166 */ 5167 ath_btcoex_enable(sc, ic->ic_curchan); 5168 5169 /* 5170 * If we're doing TDMA, enforce the TXOP limitation for chips 5171 * that support it. 5172 */ 5173 if (sc->sc_hasenforcetxop && sc->sc_tdma) 5174 ath_hal_setenforcetxop(sc->sc_ah, 1); 5175 else 5176 ath_hal_setenforcetxop(sc->sc_ah, 0); 5177 5178 /* 5179 * Re-enable rx framework. 5180 */ 5181 if (ath_startrecv(sc) != 0) { 5182 device_printf(sc->sc_dev, 5183 "%s: unable to restart recv logic\n", __func__); 5184 ret = EIO; 5185 goto finish; 5186 } 5187 5188 /* 5189 * Change channels and update the h/w rate map 5190 * if we're switching; e.g. 11a to 11b/g. 5191 */ 5192 ath_chan_change(sc, chan); 5193 5194 /* 5195 * Reset clears the beacon timers; reset them 5196 * here if needed. 5197 */ 5198 if (sc->sc_beacons) { /* restart beacons */ 5199#ifdef IEEE80211_SUPPORT_TDMA 5200 if (sc->sc_tdma) 5201 ath_tdma_config(sc, NULL); 5202 else 5203#endif 5204 ath_beacon_config(sc, NULL); 5205 } 5206 5207 /* 5208 * Re-enable interrupts. 5209 */ 5210#if 0 5211 ath_hal_intrset(ah, sc->sc_imask); 5212#endif 5213 } 5214 5215finish: 5216 ATH_PCU_LOCK(sc); 5217 sc->sc_inreset_cnt--; 5218 /* XXX only do this if sc_inreset_cnt == 0? */ 5219 ath_hal_intrset(ah, sc->sc_imask); 5220 ATH_PCU_UNLOCK(sc); 5221 5222 ath_txrx_start(sc); 5223 /* XXX ath_start? */ 5224 5225 return ret; 5226} 5227 5228/* 5229 * Periodically recalibrate the PHY to account 5230 * for temperature/environment changes. 5231 */ 5232static void 5233ath_calibrate(void *arg) 5234{ 5235 struct ath_softc *sc = arg; 5236 struct ath_hal *ah = sc->sc_ah; 5237 struct ieee80211com *ic = &sc->sc_ic; 5238 HAL_BOOL longCal, isCalDone = AH_TRUE; 5239 HAL_BOOL aniCal, shortCal = AH_FALSE; 5240 int nextcal; 5241 5242 ATH_LOCK_ASSERT(sc); 5243 5244 /* 5245 * Force the hardware awake for ANI work. 5246 */ 5247 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5248 5249 /* Skip trying to do this if we're in reset */ 5250 if (sc->sc_inreset_cnt) 5251 goto restart; 5252 5253 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ 5254 goto restart; 5255 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); 5256 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); 5257 if (sc->sc_doresetcal) 5258 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); 5259 5260 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); 5261 if (aniCal) { 5262 sc->sc_stats.ast_ani_cal++; 5263 sc->sc_lastani = ticks; 5264 ath_hal_ani_poll(ah, sc->sc_curchan); 5265 } 5266 5267 if (longCal) { 5268 sc->sc_stats.ast_per_cal++; 5269 sc->sc_lastlongcal = ticks; 5270 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { 5271 /* 5272 * Rfgain is out of bounds, reset the chip 5273 * to load new gain values. 5274 */ 5275 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5276 "%s: rfgain change\n", __func__); 5277 sc->sc_stats.ast_per_rfgain++; 5278 sc->sc_resetcal = 0; 5279 sc->sc_doresetcal = AH_TRUE; 5280 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 5281 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5282 ath_power_restore_power_state(sc); 5283 return; 5284 } 5285 /* 5286 * If this long cal is after an idle period, then 5287 * reset the data collection state so we start fresh. 5288 */ 5289 if (sc->sc_resetcal) { 5290 (void) ath_hal_calreset(ah, sc->sc_curchan); 5291 sc->sc_lastcalreset = ticks; 5292 sc->sc_lastshortcal = ticks; 5293 sc->sc_resetcal = 0; 5294 sc->sc_doresetcal = AH_TRUE; 5295 } 5296 } 5297 5298 /* Only call if we're doing a short/long cal, not for ANI calibration */ 5299 if (shortCal || longCal) { 5300 isCalDone = AH_FALSE; 5301 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { 5302 if (longCal) { 5303 /* 5304 * Calibrate noise floor data again in case of change. 5305 */ 5306 ath_hal_process_noisefloor(ah); 5307 } 5308 } else { 5309 DPRINTF(sc, ATH_DEBUG_ANY, 5310 "%s: calibration of channel %u failed\n", 5311 __func__, sc->sc_curchan->ic_freq); 5312 sc->sc_stats.ast_per_calfail++; 5313 } 5314 if (shortCal) 5315 sc->sc_lastshortcal = ticks; 5316 } 5317 if (!isCalDone) { 5318restart: 5319 /* 5320 * Use a shorter interval to potentially collect multiple 5321 * data samples required to complete calibration. Once 5322 * we're told the work is done we drop back to a longer 5323 * interval between requests. We're more aggressive doing 5324 * work when operating as an AP to improve operation right 5325 * after startup. 5326 */ 5327 sc->sc_lastshortcal = ticks; 5328 nextcal = ath_shortcalinterval*hz/1000; 5329 if (sc->sc_opmode != HAL_M_HOSTAP) 5330 nextcal *= 10; 5331 sc->sc_doresetcal = AH_TRUE; 5332 } else { 5333 /* nextcal should be the shortest time for next event */ 5334 nextcal = ath_longcalinterval*hz; 5335 if (sc->sc_lastcalreset == 0) 5336 sc->sc_lastcalreset = sc->sc_lastlongcal; 5337 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) 5338 sc->sc_resetcal = 1; /* setup reset next trip */ 5339 sc->sc_doresetcal = AH_FALSE; 5340 } 5341 /* ANI calibration may occur more often than short/long/resetcal */ 5342 if (ath_anicalinterval > 0) 5343 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); 5344 5345 if (nextcal != 0) { 5346 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", 5347 __func__, nextcal, isCalDone ? "" : "!"); 5348 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); 5349 } else { 5350 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", 5351 __func__); 5352 /* NB: don't rearm timer */ 5353 } 5354 /* 5355 * Restore power state now that we're done. 5356 */ 5357 ath_power_restore_power_state(sc); 5358} 5359 5360static void 5361ath_scan_start(struct ieee80211com *ic) 5362{ 5363 struct ath_softc *sc = ic->ic_softc; 5364 struct ath_hal *ah = sc->sc_ah; 5365 u_int32_t rfilt; 5366 5367 /* XXX calibration timer? */ 5368 /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */ 5369 5370 ATH_LOCK(sc); 5371 sc->sc_scanning = 1; 5372 sc->sc_syncbeacon = 0; 5373 rfilt = ath_calcrxfilter(sc); 5374 ATH_UNLOCK(sc); 5375 5376 ATH_PCU_LOCK(sc); 5377 ath_hal_setrxfilter(ah, rfilt); 5378 ath_hal_setassocid(ah, ieee80211broadcastaddr, 0); 5379 ATH_PCU_UNLOCK(sc); 5380 5381 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", 5382 __func__, rfilt, ether_sprintf(ieee80211broadcastaddr)); 5383} 5384 5385static void 5386ath_scan_end(struct ieee80211com *ic) 5387{ 5388 struct ath_softc *sc = ic->ic_softc; 5389 struct ath_hal *ah = sc->sc_ah; 5390 u_int32_t rfilt; 5391 5392 ATH_LOCK(sc); 5393 sc->sc_scanning = 0; 5394 rfilt = ath_calcrxfilter(sc); 5395 ATH_UNLOCK(sc); 5396 5397 ATH_PCU_LOCK(sc); 5398 ath_hal_setrxfilter(ah, rfilt); 5399 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5400 5401 ath_hal_process_noisefloor(ah); 5402 ATH_PCU_UNLOCK(sc); 5403 5404 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5405 __func__, rfilt, ether_sprintf(sc->sc_curbssid), 5406 sc->sc_curaid); 5407} 5408 5409#ifdef ATH_ENABLE_11N 5410/* 5411 * For now, just do a channel change. 5412 * 5413 * Later, we'll go through the hard slog of suspending tx/rx, changing rate 5414 * control state and resetting the hardware without dropping frames out 5415 * of the queue. 5416 * 5417 * The unfortunate trouble here is making absolutely sure that the 5418 * channel width change has propagated enough so the hardware 5419 * absolutely isn't handed bogus frames for it's current operating 5420 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and 5421 * does occur in parallel, we need to make certain we've blocked 5422 * any further ongoing TX (and RX, that can cause raw TX) 5423 * before we do this. 5424 */ 5425static void 5426ath_update_chw(struct ieee80211com *ic) 5427{ 5428 struct ath_softc *sc = ic->ic_softc; 5429 5430 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); 5431 ath_set_channel(ic); 5432} 5433#endif /* ATH_ENABLE_11N */ 5434 5435static void 5436ath_set_channel(struct ieee80211com *ic) 5437{ 5438 struct ath_softc *sc = ic->ic_softc; 5439 5440 ATH_LOCK(sc); 5441 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5442 ATH_UNLOCK(sc); 5443 5444 (void) ath_chan_set(sc, ic->ic_curchan); 5445 /* 5446 * If we are returning to our bss channel then mark state 5447 * so the next recv'd beacon's tsf will be used to sync the 5448 * beacon timers. Note that since we only hear beacons in 5449 * sta/ibss mode this has no effect in other operating modes. 5450 */ 5451 ATH_LOCK(sc); 5452 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) 5453 sc->sc_syncbeacon = 1; 5454 ath_power_restore_power_state(sc); 5455 ATH_UNLOCK(sc); 5456} 5457 5458/* 5459 * Walk the vap list and check if there any vap's in RUN state. 5460 */ 5461static int 5462ath_isanyrunningvaps(struct ieee80211vap *this) 5463{ 5464 struct ieee80211com *ic = this->iv_ic; 5465 struct ieee80211vap *vap; 5466 5467 IEEE80211_LOCK_ASSERT(ic); 5468 5469 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { 5470 if (vap != this && vap->iv_state >= IEEE80211_S_RUN) 5471 return 1; 5472 } 5473 return 0; 5474} 5475 5476static int 5477ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 5478{ 5479 struct ieee80211com *ic = vap->iv_ic; 5480 struct ath_softc *sc = ic->ic_softc; 5481 struct ath_vap *avp = ATH_VAP(vap); 5482 struct ath_hal *ah = sc->sc_ah; 5483 struct ieee80211_node *ni = NULL; 5484 int i, error, stamode; 5485 u_int32_t rfilt; 5486 int csa_run_transition = 0; 5487 enum ieee80211_state ostate = vap->iv_state; 5488 5489 static const HAL_LED_STATE leds[] = { 5490 HAL_LED_INIT, /* IEEE80211_S_INIT */ 5491 HAL_LED_SCAN, /* IEEE80211_S_SCAN */ 5492 HAL_LED_AUTH, /* IEEE80211_S_AUTH */ 5493 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ 5494 HAL_LED_RUN, /* IEEE80211_S_CAC */ 5495 HAL_LED_RUN, /* IEEE80211_S_RUN */ 5496 HAL_LED_RUN, /* IEEE80211_S_CSA */ 5497 HAL_LED_RUN, /* IEEE80211_S_SLEEP */ 5498 }; 5499 5500 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, 5501 ieee80211_state_name[ostate], 5502 ieee80211_state_name[nstate]); 5503 5504 /* 5505 * net80211 _should_ have the comlock asserted at this point. 5506 * There are some comments around the calls to vap->iv_newstate 5507 * which indicate that it (newstate) may end up dropping the 5508 * lock. This and the subsequent lock assert check after newstate 5509 * are an attempt to catch these and figure out how/why. 5510 */ 5511 IEEE80211_LOCK_ASSERT(ic); 5512 5513 /* Before we touch the hardware - wake it up */ 5514 ATH_LOCK(sc); 5515 /* 5516 * If the NIC is in anything other than SLEEP state, 5517 * we need to ensure that self-generated frames are 5518 * set for PWRMGT=0. Otherwise we may end up with 5519 * strange situations. 5520 * 5521 * XXX TODO: is this actually the case? :-) 5522 */ 5523 if (nstate != IEEE80211_S_SLEEP) 5524 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5525 5526 /* 5527 * Now, wake the thing up. 5528 */ 5529 ath_power_set_power_state(sc, HAL_PM_AWAKE); 5530 5531 /* 5532 * And stop the calibration callout whilst we have 5533 * ATH_LOCK held. 5534 */ 5535 callout_stop(&sc->sc_cal_ch); 5536 ATH_UNLOCK(sc); 5537 5538 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) 5539 csa_run_transition = 1; 5540 5541 ath_hal_setledstate(ah, leds[nstate]); /* set LED */ 5542 5543 if (nstate == IEEE80211_S_SCAN) { 5544 /* 5545 * Scanning: turn off beacon miss and don't beacon. 5546 * Mark beacon state so when we reach RUN state we'll 5547 * [re]setup beacons. Unblock the task q thread so 5548 * deferred interrupt processing is done. 5549 */ 5550 5551 /* Ensure we stay awake during scan */ 5552 ATH_LOCK(sc); 5553 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5554 ath_power_setpower(sc, HAL_PM_AWAKE); 5555 ATH_UNLOCK(sc); 5556 5557 ath_hal_intrset(ah, 5558 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); 5559 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5560 sc->sc_beacons = 0; 5561 taskqueue_unblock(sc->sc_tq); 5562 } 5563 5564 ni = ieee80211_ref_node(vap->iv_bss); 5565 rfilt = ath_calcrxfilter(sc); 5566 stamode = (vap->iv_opmode == IEEE80211_M_STA || 5567 vap->iv_opmode == IEEE80211_M_AHDEMO || 5568 vap->iv_opmode == IEEE80211_M_IBSS); 5569 5570 /* 5571 * XXX Dont need to do this (and others) if we've transitioned 5572 * from SLEEP->RUN. 5573 */ 5574 if (stamode && nstate == IEEE80211_S_RUN) { 5575 sc->sc_curaid = ni->ni_associd; 5576 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); 5577 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); 5578 } 5579 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", 5580 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); 5581 ath_hal_setrxfilter(ah, rfilt); 5582 5583 /* XXX is this to restore keycache on resume? */ 5584 if (vap->iv_opmode != IEEE80211_M_STA && 5585 (vap->iv_flags & IEEE80211_F_PRIVACY)) { 5586 for (i = 0; i < IEEE80211_WEP_NKID; i++) 5587 if (ath_hal_keyisvalid(ah, i)) 5588 ath_hal_keysetmac(ah, i, ni->ni_bssid); 5589 } 5590 5591 /* 5592 * Invoke the parent method to do net80211 work. 5593 */ 5594 error = avp->av_newstate(vap, nstate, arg); 5595 if (error != 0) 5596 goto bad; 5597 5598 /* 5599 * See above: ensure av_newstate() doesn't drop the lock 5600 * on us. 5601 */ 5602 IEEE80211_LOCK_ASSERT(ic); 5603 5604 if (nstate == IEEE80211_S_RUN) { 5605 /* NB: collect bss node again, it may have changed */ 5606 ieee80211_free_node(ni); 5607 ni = ieee80211_ref_node(vap->iv_bss); 5608 5609 DPRINTF(sc, ATH_DEBUG_STATE, 5610 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " 5611 "capinfo 0x%04x chan %d\n", __func__, 5612 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), 5613 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); 5614 5615 switch (vap->iv_opmode) { 5616#ifdef IEEE80211_SUPPORT_TDMA 5617 case IEEE80211_M_AHDEMO: 5618 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) 5619 break; 5620 /* fall thru... */ 5621#endif 5622 case IEEE80211_M_HOSTAP: 5623 case IEEE80211_M_IBSS: 5624 case IEEE80211_M_MBSS: 5625 /* 5626 * Allocate and setup the beacon frame. 5627 * 5628 * Stop any previous beacon DMA. This may be 5629 * necessary, for example, when an ibss merge 5630 * causes reconfiguration; there will be a state 5631 * transition from RUN->RUN that means we may 5632 * be called with beacon transmission active. 5633 */ 5634 ath_hal_stoptxdma(ah, sc->sc_bhalq); 5635 5636 error = ath_beacon_alloc(sc, ni); 5637 if (error != 0) 5638 goto bad; 5639 /* 5640 * If joining an adhoc network defer beacon timer 5641 * configuration to the next beacon frame so we 5642 * have a current TSF to use. Otherwise we're 5643 * starting an ibss/bss so there's no need to delay; 5644 * if this is the first vap moving to RUN state, then 5645 * beacon state needs to be [re]configured. 5646 */ 5647 if (vap->iv_opmode == IEEE80211_M_IBSS && 5648 ni->ni_tstamp.tsf != 0) { 5649 sc->sc_syncbeacon = 1; 5650 } else if (!sc->sc_beacons) { 5651#ifdef IEEE80211_SUPPORT_TDMA 5652 if (vap->iv_caps & IEEE80211_C_TDMA) 5653 ath_tdma_config(sc, vap); 5654 else 5655#endif 5656 ath_beacon_config(sc, vap); 5657 sc->sc_beacons = 1; 5658 } 5659 break; 5660 case IEEE80211_M_STA: 5661 /* 5662 * Defer beacon timer configuration to the next 5663 * beacon frame so we have a current TSF to use 5664 * (any TSF collected when scanning is likely old). 5665 * However if it's due to a CSA -> RUN transition, 5666 * force a beacon update so we pick up a lack of 5667 * beacons from an AP in CAC and thus force a 5668 * scan. 5669 * 5670 * And, there's also corner cases here where 5671 * after a scan, the AP may have disappeared. 5672 * In that case, we may not receive an actual 5673 * beacon to update the beacon timer and thus we 5674 * won't get notified of the missing beacons. 5675 */ 5676 if (ostate != IEEE80211_S_RUN && 5677 ostate != IEEE80211_S_SLEEP) { 5678 DPRINTF(sc, ATH_DEBUG_BEACON, 5679 "%s: STA; syncbeacon=1\n", __func__); 5680 sc->sc_syncbeacon = 1; 5681 5682 if (csa_run_transition) 5683 ath_beacon_config(sc, vap); 5684 5685 /* 5686 * PR: kern/175227 5687 * 5688 * Reconfigure beacons during reset; as otherwise 5689 * we won't get the beacon timers reprogrammed 5690 * after a reset and thus we won't pick up a 5691 * beacon miss interrupt. 5692 * 5693 * Hopefully we'll see a beacon before the BMISS 5694 * timer fires (too often), leading to a STA 5695 * disassociation. 5696 */ 5697 sc->sc_beacons = 1; 5698 } 5699 break; 5700 case IEEE80211_M_MONITOR: 5701 /* 5702 * Monitor mode vaps have only INIT->RUN and RUN->RUN 5703 * transitions so we must re-enable interrupts here to 5704 * handle the case of a single monitor mode vap. 5705 */ 5706 ath_hal_intrset(ah, sc->sc_imask); 5707 break; 5708 case IEEE80211_M_WDS: 5709 break; 5710 default: 5711 break; 5712 } 5713 /* 5714 * Let the hal process statistics collected during a 5715 * scan so it can provide calibrated noise floor data. 5716 */ 5717 ath_hal_process_noisefloor(ah); 5718 /* 5719 * Reset rssi stats; maybe not the best place... 5720 */ 5721 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; 5722 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; 5723 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 5724 5725 /* 5726 * Force awake for RUN mode. 5727 */ 5728 ATH_LOCK(sc); 5729 ath_power_setselfgen(sc, HAL_PM_AWAKE); 5730 ath_power_setpower(sc, HAL_PM_AWAKE); 5731 5732 /* 5733 * Finally, start any timers and the task q thread 5734 * (in case we didn't go through SCAN state). 5735 */ 5736 if (ath_longcalinterval != 0) { 5737 /* start periodic recalibration timer */ 5738 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); 5739 } else { 5740 DPRINTF(sc, ATH_DEBUG_CALIBRATE, 5741 "%s: calibration disabled\n", __func__); 5742 } 5743 ATH_UNLOCK(sc); 5744 5745 taskqueue_unblock(sc->sc_tq); 5746 } else if (nstate == IEEE80211_S_INIT) { 5747 /* 5748 * If there are no vaps left in RUN state then 5749 * shutdown host/driver operation: 5750 * o disable interrupts 5751 * o disable the task queue thread 5752 * o mark beacon processing as stopped 5753 */ 5754 if (!ath_isanyrunningvaps(vap)) { 5755 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); 5756 /* disable interrupts */ 5757 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); 5758 taskqueue_block(sc->sc_tq); 5759 sc->sc_beacons = 0; 5760 } 5761#ifdef IEEE80211_SUPPORT_TDMA 5762 ath_hal_setcca(ah, AH_TRUE); 5763#endif 5764 } else if (nstate == IEEE80211_S_SLEEP) { 5765 /* We're going to sleep, so transition appropriately */ 5766 /* For now, only do this if we're a single STA vap */ 5767 if (sc->sc_nvaps == 1 && 5768 vap->iv_opmode == IEEE80211_M_STA) { 5769 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon); 5770 ATH_LOCK(sc); 5771 /* 5772 * Always at least set the self-generated 5773 * frame config to set PWRMGT=1. 5774 */ 5775 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); 5776 5777 /* 5778 * If we're not syncing beacons, transition 5779 * to NETWORK_SLEEP. 5780 * 5781 * We stay awake if syncbeacon > 0 in case 5782 * we need to listen for some beacons otherwise 5783 * our beacon timer config may be wrong. 5784 */ 5785 if (sc->sc_syncbeacon == 0) { 5786 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP); 5787 } 5788 ATH_UNLOCK(sc); 5789 } 5790 } 5791bad: 5792 ieee80211_free_node(ni); 5793 5794 /* 5795 * Restore the power state - either to what it was, or 5796 * to network_sleep if it's alright. 5797 */ 5798 ATH_LOCK(sc); 5799 ath_power_restore_power_state(sc); 5800 ATH_UNLOCK(sc); 5801 return error; 5802} 5803 5804/* 5805 * Allocate a key cache slot to the station so we can 5806 * setup a mapping from key index to node. The key cache 5807 * slot is needed for managing antenna state and for 5808 * compression when stations do not use crypto. We do 5809 * it uniliaterally here; if crypto is employed this slot 5810 * will be reassigned. 5811 */ 5812static void 5813ath_setup_stationkey(struct ieee80211_node *ni) 5814{ 5815 struct ieee80211vap *vap = ni->ni_vap; 5816 struct ath_softc *sc = vap->iv_ic->ic_softc; 5817 ieee80211_keyix keyix, rxkeyix; 5818 5819 /* XXX should take a locked ref to vap->iv_bss */ 5820 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { 5821 /* 5822 * Key cache is full; we'll fall back to doing 5823 * the more expensive lookup in software. Note 5824 * this also means no h/w compression. 5825 */ 5826 /* XXX msg+statistic */ 5827 } else { 5828 /* XXX locking? */ 5829 ni->ni_ucastkey.wk_keyix = keyix; 5830 ni->ni_ucastkey.wk_rxkeyix = rxkeyix; 5831 /* NB: must mark device key to get called back on delete */ 5832 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; 5833 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); 5834 /* NB: this will create a pass-thru key entry */ 5835 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); 5836 } 5837} 5838 5839/* 5840 * Setup driver-specific state for a newly associated node. 5841 * Note that we're called also on a re-associate, the isnew 5842 * param tells us if this is the first time or not. 5843 */ 5844static void 5845ath_newassoc(struct ieee80211_node *ni, int isnew) 5846{ 5847 struct ath_node *an = ATH_NODE(ni); 5848 struct ieee80211vap *vap = ni->ni_vap; 5849 struct ath_softc *sc = vap->iv_ic->ic_softc; 5850 const struct ieee80211_txparam *tp = ni->ni_txparms; 5851 5852 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); 5853 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); 5854 5855 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n", 5856 __func__, 5857 ni->ni_macaddr, 5858 ":", 5859 isnew, 5860 an->an_is_powersave); 5861 5862 ATH_NODE_LOCK(an); 5863 ath_rate_newassoc(sc, an, isnew); 5864 ATH_NODE_UNLOCK(an); 5865 5866 if (isnew && 5867 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && 5868 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) 5869 ath_setup_stationkey(ni); 5870 5871 /* 5872 * If we're reassociating, make sure that any paused queues 5873 * get unpaused. 5874 * 5875 * Now, we may have frames in the hardware queue for this node. 5876 * So if we are reassociating and there are frames in the queue, 5877 * we need to go through the cleanup path to ensure that they're 5878 * marked as non-aggregate. 5879 */ 5880 if (! isnew) { 5881 DPRINTF(sc, ATH_DEBUG_NODE, 5882 "%s: %6D: reassoc; is_powersave=%d\n", 5883 __func__, 5884 ni->ni_macaddr, 5885 ":", 5886 an->an_is_powersave); 5887 5888 /* XXX for now, we can't hold the lock across assoc */ 5889 ath_tx_node_reassoc(sc, an); 5890 5891 /* XXX for now, we can't hold the lock across wakeup */ 5892 if (an->an_is_powersave) 5893 ath_tx_node_wakeup(sc, an); 5894 } 5895} 5896 5897static int 5898ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, 5899 int nchans, struct ieee80211_channel chans[]) 5900{ 5901 struct ath_softc *sc = ic->ic_softc; 5902 struct ath_hal *ah = sc->sc_ah; 5903 HAL_STATUS status; 5904 5905 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5906 "%s: rd %u cc %u location %c%s\n", 5907 __func__, reg->regdomain, reg->country, reg->location, 5908 reg->ecm ? " ecm" : ""); 5909 5910 status = ath_hal_set_channels(ah, chans, nchans, 5911 reg->country, reg->regdomain); 5912 if (status != HAL_OK) { 5913 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", 5914 __func__, status); 5915 return EINVAL; /* XXX */ 5916 } 5917 5918 return 0; 5919} 5920 5921static void 5922ath_getradiocaps(struct ieee80211com *ic, 5923 int maxchans, int *nchans, struct ieee80211_channel chans[]) 5924{ 5925 struct ath_softc *sc = ic->ic_softc; 5926 struct ath_hal *ah = sc->sc_ah; 5927 5928 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", 5929 __func__, SKU_DEBUG, CTRY_DEFAULT); 5930 5931 /* XXX check return */ 5932 (void) ath_hal_getchannels(ah, chans, maxchans, nchans, 5933 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); 5934 5935} 5936 5937static int 5938ath_getchannels(struct ath_softc *sc) 5939{ 5940 struct ieee80211com *ic = &sc->sc_ic; 5941 struct ath_hal *ah = sc->sc_ah; 5942 HAL_STATUS status; 5943 5944 /* 5945 * Collect channel set based on EEPROM contents. 5946 */ 5947 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, 5948 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); 5949 if (status != HAL_OK) { 5950 device_printf(sc->sc_dev, 5951 "%s: unable to collect channel list from hal, status %d\n", 5952 __func__, status); 5953 return EINVAL; 5954 } 5955 (void) ath_hal_getregdomain(ah, &sc->sc_eerd); 5956 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ 5957 /* XXX map Atheros sku's to net80211 SKU's */ 5958 /* XXX net80211 types too small */ 5959 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; 5960 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; 5961 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ 5962 ic->ic_regdomain.isocc[1] = ' '; 5963 5964 ic->ic_regdomain.ecm = 1; 5965 ic->ic_regdomain.location = 'I'; 5966 5967 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, 5968 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", 5969 __func__, sc->sc_eerd, sc->sc_eecc, 5970 ic->ic_regdomain.regdomain, ic->ic_regdomain.country, 5971 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); 5972 return 0; 5973} 5974 5975static int 5976ath_rate_setup(struct ath_softc *sc, u_int mode) 5977{ 5978 struct ath_hal *ah = sc->sc_ah; 5979 const HAL_RATE_TABLE *rt; 5980 5981 switch (mode) { 5982 case IEEE80211_MODE_11A: 5983 rt = ath_hal_getratetable(ah, HAL_MODE_11A); 5984 break; 5985 case IEEE80211_MODE_HALF: 5986 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); 5987 break; 5988 case IEEE80211_MODE_QUARTER: 5989 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); 5990 break; 5991 case IEEE80211_MODE_11B: 5992 rt = ath_hal_getratetable(ah, HAL_MODE_11B); 5993 break; 5994 case IEEE80211_MODE_11G: 5995 rt = ath_hal_getratetable(ah, HAL_MODE_11G); 5996 break; 5997 case IEEE80211_MODE_TURBO_A: 5998 rt = ath_hal_getratetable(ah, HAL_MODE_108A); 5999 break; 6000 case IEEE80211_MODE_TURBO_G: 6001 rt = ath_hal_getratetable(ah, HAL_MODE_108G); 6002 break; 6003 case IEEE80211_MODE_STURBO_A: 6004 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); 6005 break; 6006 case IEEE80211_MODE_11NA: 6007 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); 6008 break; 6009 case IEEE80211_MODE_11NG: 6010 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); 6011 break; 6012 default: 6013 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", 6014 __func__, mode); 6015 return 0; 6016 } 6017 sc->sc_rates[mode] = rt; 6018 return (rt != NULL); 6019} 6020 6021static void 6022ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) 6023{ 6024 /* NB: on/off times from the Atheros NDIS driver, w/ permission */ 6025 static const struct { 6026 u_int rate; /* tx/rx 802.11 rate */ 6027 u_int16_t timeOn; /* LED on time (ms) */ 6028 u_int16_t timeOff; /* LED off time (ms) */ 6029 } blinkrates[] = { 6030 { 108, 40, 10 }, 6031 { 96, 44, 11 }, 6032 { 72, 50, 13 }, 6033 { 48, 57, 14 }, 6034 { 36, 67, 16 }, 6035 { 24, 80, 20 }, 6036 { 22, 100, 25 }, 6037 { 18, 133, 34 }, 6038 { 12, 160, 40 }, 6039 { 10, 200, 50 }, 6040 { 6, 240, 58 }, 6041 { 4, 267, 66 }, 6042 { 2, 400, 100 }, 6043 { 0, 500, 130 }, 6044 /* XXX half/quarter rates */ 6045 }; 6046 const HAL_RATE_TABLE *rt; 6047 int i, j; 6048 6049 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); 6050 rt = sc->sc_rates[mode]; 6051 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); 6052 for (i = 0; i < rt->rateCount; i++) { 6053 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6054 if (rt->info[i].phy != IEEE80211_T_HT) 6055 sc->sc_rixmap[ieeerate] = i; 6056 else 6057 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; 6058 } 6059 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); 6060 for (i = 0; i < nitems(sc->sc_hwmap); i++) { 6061 if (i >= rt->rateCount) { 6062 sc->sc_hwmap[i].ledon = (500 * hz) / 1000; 6063 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; 6064 continue; 6065 } 6066 sc->sc_hwmap[i].ieeerate = 6067 rt->info[i].dot11Rate & IEEE80211_RATE_VAL; 6068 if (rt->info[i].phy == IEEE80211_T_HT) 6069 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; 6070 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 6071 if (rt->info[i].shortPreamble || 6072 rt->info[i].phy == IEEE80211_T_OFDM) 6073 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; 6074 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; 6075 for (j = 0; j < nitems(blinkrates)-1; j++) 6076 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) 6077 break; 6078 /* NB: this uses the last entry if the rate isn't found */ 6079 /* XXX beware of overlow */ 6080 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; 6081 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; 6082 } 6083 sc->sc_currates = rt; 6084 sc->sc_curmode = mode; 6085 /* 6086 * All protection frames are transmitted at 2Mb/s for 6087 * 11g, otherwise at 1Mb/s. 6088 */ 6089 if (mode == IEEE80211_MODE_11G) 6090 sc->sc_protrix = ath_tx_findrix(sc, 2*2); 6091 else 6092 sc->sc_protrix = ath_tx_findrix(sc, 2*1); 6093 /* NB: caller is responsible for resetting rate control state */ 6094} 6095 6096static void 6097ath_watchdog(void *arg) 6098{ 6099 struct ath_softc *sc = arg; 6100 struct ieee80211com *ic = &sc->sc_ic; 6101 int do_reset = 0; 6102 6103 ATH_LOCK_ASSERT(sc); 6104 6105 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { 6106 uint32_t hangs; 6107 6108 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6109 6110 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && 6111 hangs != 0) { 6112 device_printf(sc->sc_dev, "%s hang detected (0x%x)\n", 6113 hangs & 0xff ? "bb" : "mac", hangs); 6114 } else 6115 device_printf(sc->sc_dev, "device timeout\n"); 6116 do_reset = 1; 6117 counter_u64_add(ic->ic_oerrors, 1); 6118 sc->sc_stats.ast_watchdog++; 6119 6120 ath_power_restore_power_state(sc); 6121 } 6122 6123 /* 6124 * We can't hold the lock across the ath_reset() call. 6125 * 6126 * And since this routine can't hold a lock and sleep, 6127 * do the reset deferred. 6128 */ 6129 if (do_reset) { 6130 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); 6131 } 6132 6133 callout_schedule(&sc->sc_wd_ch, hz); 6134} 6135 6136static void 6137ath_parent(struct ieee80211com *ic) 6138{ 6139 struct ath_softc *sc = ic->ic_softc; 6140 int error = EDOOFUS; 6141 6142 ATH_LOCK(sc); 6143 if (ic->ic_nrunning > 0) { 6144 /* 6145 * To avoid rescanning another access point, 6146 * do not call ath_init() here. Instead, 6147 * only reflect promisc mode settings. 6148 */ 6149 if (sc->sc_running) { 6150 ath_power_set_power_state(sc, HAL_PM_AWAKE); 6151 ath_mode_init(sc); 6152 ath_power_restore_power_state(sc); 6153 } else if (!sc->sc_invalid) { 6154 /* 6155 * Beware of being called during attach/detach 6156 * to reset promiscuous mode. In that case we 6157 * will still be marked UP but not RUNNING. 6158 * However trying to re-init the interface 6159 * is the wrong thing to do as we've already 6160 * torn down much of our state. There's 6161 * probably a better way to deal with this. 6162 */ 6163 error = ath_init(sc); 6164 } 6165 } else { 6166 ath_stop(sc); 6167 if (!sc->sc_invalid) 6168 ath_power_setpower(sc, HAL_PM_FULL_SLEEP); 6169 } 6170 ATH_UNLOCK(sc); 6171 6172 if (error == 0) { 6173#ifdef ATH_TX99_DIAG 6174 if (sc->sc_tx99 != NULL) 6175 sc->sc_tx99->start(sc->sc_tx99); 6176 else 6177#endif 6178 ieee80211_start_all(ic); 6179 } 6180} 6181 6182/* 6183 * Announce various information on device/driver attach. 6184 */ 6185static void 6186ath_announce(struct ath_softc *sc) 6187{ 6188 struct ath_hal *ah = sc->sc_ah; 6189 6190 device_printf(sc->sc_dev, "%s mac %d.%d RF%s phy %d.%d\n", 6191 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, 6192 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); 6193 device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", 6194 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); 6195 if (bootverbose) { 6196 int i; 6197 for (i = 0; i <= WME_AC_VO; i++) { 6198 struct ath_txq *txq = sc->sc_ac2q[i]; 6199 device_printf(sc->sc_dev, 6200 "Use hw queue %u for %s traffic\n", 6201 txq->axq_qnum, ieee80211_wme_acnames[i]); 6202 } 6203 device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n", 6204 sc->sc_cabq->axq_qnum); 6205 device_printf(sc->sc_dev, "Use hw queue %u for beacons\n", 6206 sc->sc_bhalq); 6207 } 6208 if (ath_rxbuf != ATH_RXBUF) 6209 device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf); 6210 if (ath_txbuf != ATH_TXBUF) 6211 device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf); 6212 if (sc->sc_mcastkey && bootverbose) 6213 device_printf(sc->sc_dev, "using multicast key search\n"); 6214} 6215 6216static void 6217ath_dfs_tasklet(void *p, int npending) 6218{ 6219 struct ath_softc *sc = (struct ath_softc *) p; 6220 struct ieee80211com *ic = &sc->sc_ic; 6221 6222 /* 6223 * If previous processing has found a radar event, 6224 * signal this to the net80211 layer to begin DFS 6225 * processing. 6226 */ 6227 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { 6228 /* DFS event found, initiate channel change */ 6229 /* 6230 * XXX doesn't currently tell us whether the event 6231 * XXX was found in the primary or extension 6232 * XXX channel! 6233 */ 6234 IEEE80211_LOCK(ic); 6235 ieee80211_dfs_notify_radar(ic, sc->sc_curchan); 6236 IEEE80211_UNLOCK(ic); 6237 } 6238} 6239 6240/* 6241 * Enable/disable power save. This must be called with 6242 * no TX driver locks currently held, so it should only 6243 * be called from the RX path (which doesn't hold any 6244 * TX driver locks.) 6245 */ 6246static void 6247ath_node_powersave(struct ieee80211_node *ni, int enable) 6248{ 6249#ifdef ATH_SW_PSQ 6250 struct ath_node *an = ATH_NODE(ni); 6251 struct ieee80211com *ic = ni->ni_ic; 6252 struct ath_softc *sc = ic->ic_softc; 6253 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6254 6255 /* XXX and no TXQ locks should be held here */ 6256 6257 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", 6258 __func__, 6259 ni->ni_macaddr, 6260 ":", 6261 !! enable); 6262 6263 /* Suspend or resume software queue handling */ 6264 if (enable) 6265 ath_tx_node_sleep(sc, an); 6266 else 6267 ath_tx_node_wakeup(sc, an); 6268 6269 /* Update net80211 state */ 6270 avp->av_node_ps(ni, enable); 6271#else 6272 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6273 6274 /* Update net80211 state */ 6275 avp->av_node_ps(ni, enable); 6276#endif/* ATH_SW_PSQ */ 6277} 6278 6279/* 6280 * Notification from net80211 that the powersave queue state has 6281 * changed. 6282 * 6283 * Since the software queue also may have some frames: 6284 * 6285 * + if the node software queue has frames and the TID state 6286 * is 0, we set the TIM; 6287 * + if the node and the stack are both empty, we clear the TIM bit. 6288 * + If the stack tries to set the bit, always set it. 6289 * + If the stack tries to clear the bit, only clear it if the 6290 * software queue in question is also cleared. 6291 * 6292 * TODO: this is called during node teardown; so let's ensure this 6293 * is all correctly handled and that the TIM bit is cleared. 6294 * It may be that the node flush is called _AFTER_ the net80211 6295 * stack clears the TIM. 6296 * 6297 * Here is the racy part. Since it's possible >1 concurrent, 6298 * overlapping TXes will appear complete with a TX completion in 6299 * another thread, it's possible that the concurrent TIM calls will 6300 * clash. We can't hold the node lock here because setting the 6301 * TIM grabs the net80211 comlock and this may cause a LOR. 6302 * The solution is either to totally serialise _everything_ at 6303 * this point (ie, all TX, completion and any reset/flush go into 6304 * one taskqueue) or a new "ath TIM lock" needs to be created that 6305 * just wraps the driver state change and this call to avp->av_set_tim(). 6306 * 6307 * The same race exists in the net80211 power save queue handling 6308 * as well. Since multiple transmitting threads may queue frames 6309 * into the driver, as well as ps-poll and the driver transmitting 6310 * frames (and thus clearing the psq), it's quite possible that 6311 * a packet entering the PSQ and a ps-poll being handled will 6312 * race, causing the TIM to be cleared and not re-set. 6313 */ 6314static int 6315ath_node_set_tim(struct ieee80211_node *ni, int enable) 6316{ 6317#ifdef ATH_SW_PSQ 6318 struct ieee80211com *ic = ni->ni_ic; 6319 struct ath_softc *sc = ic->ic_softc; 6320 struct ath_node *an = ATH_NODE(ni); 6321 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6322 int changed = 0; 6323 6324 ATH_TX_LOCK(sc); 6325 an->an_stack_psq = enable; 6326 6327 /* 6328 * This will get called for all operating modes, 6329 * even if avp->av_set_tim is unset. 6330 * It's currently set for hostap/ibss modes; but 6331 * the same infrastructure is used for both STA 6332 * and AP/IBSS node power save. 6333 */ 6334 if (avp->av_set_tim == NULL) { 6335 ATH_TX_UNLOCK(sc); 6336 return (0); 6337 } 6338 6339 /* 6340 * If setting the bit, always set it here. 6341 * If clearing the bit, only clear it if the 6342 * software queue is also empty. 6343 * 6344 * If the node has left power save, just clear the TIM 6345 * bit regardless of the state of the power save queue. 6346 * 6347 * XXX TODO: although atomics are used, it's quite possible 6348 * that a race will occur between this and setting/clearing 6349 * in another thread. TX completion will occur always in 6350 * one thread, however setting/clearing the TIM bit can come 6351 * from a variety of different process contexts! 6352 */ 6353 if (enable && an->an_tim_set == 1) { 6354 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6355 "%s: %6D: enable=%d, tim_set=1, ignoring\n", 6356 __func__, 6357 ni->ni_macaddr, 6358 ":", 6359 enable); 6360 ATH_TX_UNLOCK(sc); 6361 } else if (enable) { 6362 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6363 "%s: %6D: enable=%d, enabling TIM\n", 6364 __func__, 6365 ni->ni_macaddr, 6366 ":", 6367 enable); 6368 an->an_tim_set = 1; 6369 ATH_TX_UNLOCK(sc); 6370 changed = avp->av_set_tim(ni, enable); 6371 } else if (an->an_swq_depth == 0) { 6372 /* disable */ 6373 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6374 "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", 6375 __func__, 6376 ni->ni_macaddr, 6377 ":", 6378 enable); 6379 an->an_tim_set = 0; 6380 ATH_TX_UNLOCK(sc); 6381 changed = avp->av_set_tim(ni, enable); 6382 } else if (! an->an_is_powersave) { 6383 /* 6384 * disable regardless; the node isn't in powersave now 6385 */ 6386 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6387 "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", 6388 __func__, 6389 ni->ni_macaddr, 6390 ":", 6391 enable); 6392 an->an_tim_set = 0; 6393 ATH_TX_UNLOCK(sc); 6394 changed = avp->av_set_tim(ni, enable); 6395 } else { 6396 /* 6397 * psq disable, node is currently in powersave, node 6398 * software queue isn't empty, so don't clear the TIM bit 6399 * for now. 6400 */ 6401 ATH_TX_UNLOCK(sc); 6402 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6403 "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", 6404 __func__, 6405 ni->ni_macaddr, 6406 ":", 6407 enable); 6408 changed = 0; 6409 } 6410 6411 return (changed); 6412#else 6413 struct ath_vap *avp = ATH_VAP(ni->ni_vap); 6414 6415 /* 6416 * Some operating modes don't set av_set_tim(), so don't 6417 * update it here. 6418 */ 6419 if (avp->av_set_tim == NULL) 6420 return (0); 6421 6422 return (avp->av_set_tim(ni, enable)); 6423#endif /* ATH_SW_PSQ */ 6424} 6425 6426/* 6427 * Set or update the TIM from the software queue. 6428 * 6429 * Check the software queue depth before attempting to do lock 6430 * anything; that avoids trying to obtain the lock. Then, 6431 * re-check afterwards to ensure nothing has changed in the 6432 * meantime. 6433 * 6434 * set: This is designed to be called from the TX path, after 6435 * a frame has been queued; to see if the swq > 0. 6436 * 6437 * clear: This is designed to be called from the buffer completion point 6438 * (right now it's ath_tx_default_comp()) where the state of 6439 * a software queue has changed. 6440 * 6441 * It makes sense to place it at buffer free / completion rather 6442 * than after each software queue operation, as there's no real 6443 * point in churning the TIM bit as the last frames in the software 6444 * queue are transmitted. If they fail and we retry them, we'd 6445 * just be setting the TIM bit again anyway. 6446 */ 6447void 6448ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, 6449 int enable) 6450{ 6451#ifdef ATH_SW_PSQ 6452 struct ath_node *an; 6453 struct ath_vap *avp; 6454 6455 /* Don't do this for broadcast/etc frames */ 6456 if (ni == NULL) 6457 return; 6458 6459 an = ATH_NODE(ni); 6460 avp = ATH_VAP(ni->ni_vap); 6461 6462 /* 6463 * And for operating modes without the TIM handler set, let's 6464 * just skip those. 6465 */ 6466 if (avp->av_set_tim == NULL) 6467 return; 6468 6469 ATH_TX_LOCK_ASSERT(sc); 6470 6471 if (enable) { 6472 if (an->an_is_powersave && 6473 an->an_tim_set == 0 && 6474 an->an_swq_depth != 0) { 6475 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6476 "%s: %6D: swq_depth>0, tim_set=0, set!\n", 6477 __func__, 6478 ni->ni_macaddr, 6479 ":"); 6480 an->an_tim_set = 1; 6481 (void) avp->av_set_tim(ni, 1); 6482 } 6483 } else { 6484 /* 6485 * Don't bother grabbing the lock unless the queue is empty. 6486 */ 6487 if (an->an_swq_depth != 0) 6488 return; 6489 6490 if (an->an_is_powersave && 6491 an->an_stack_psq == 0 && 6492 an->an_tim_set == 1 && 6493 an->an_swq_depth == 0) { 6494 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6495 "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," 6496 " clear!\n", 6497 __func__, 6498 ni->ni_macaddr, 6499 ":"); 6500 an->an_tim_set = 0; 6501 (void) avp->av_set_tim(ni, 0); 6502 } 6503 } 6504#else 6505 return; 6506#endif /* ATH_SW_PSQ */ 6507} 6508 6509/* 6510 * Received a ps-poll frame from net80211. 6511 * 6512 * Here we get a chance to serve out a software-queued frame ourselves 6513 * before we punt it to net80211 to transmit us one itself - either 6514 * because there's traffic in the net80211 psq, or a NULL frame to 6515 * indicate there's nothing else. 6516 */ 6517static void 6518ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) 6519{ 6520#ifdef ATH_SW_PSQ 6521 struct ath_node *an; 6522 struct ath_vap *avp; 6523 struct ieee80211com *ic = ni->ni_ic; 6524 struct ath_softc *sc = ic->ic_softc; 6525 int tid; 6526 6527 /* Just paranoia */ 6528 if (ni == NULL) 6529 return; 6530 6531 /* 6532 * Unassociated (temporary node) station. 6533 */ 6534 if (ni->ni_associd == 0) 6535 return; 6536 6537 /* 6538 * We do have an active node, so let's begin looking into it. 6539 */ 6540 an = ATH_NODE(ni); 6541 avp = ATH_VAP(ni->ni_vap); 6542 6543 /* 6544 * For now, we just call the original ps-poll method. 6545 * Once we're ready to flip this on: 6546 * 6547 * + Set leak to 1, as no matter what we're going to have 6548 * to send a frame; 6549 * + Check the software queue and if there's something in it, 6550 * schedule the highest TID thas has traffic from this node. 6551 * Then make sure we schedule the software scheduler to 6552 * run so it picks up said frame. 6553 * 6554 * That way whatever happens, we'll at least send _a_ frame 6555 * to the given node. 6556 * 6557 * Again, yes, it's crappy QoS if the node has multiple 6558 * TIDs worth of traffic - but let's get it working first 6559 * before we optimise it. 6560 * 6561 * Also yes, there's definitely latency here - we're not 6562 * direct dispatching to the hardware in this path (and 6563 * we're likely being called from the packet receive path, 6564 * so going back into TX may be a little hairy!) but again 6565 * I'd like to get this working first before optimising 6566 * turn-around time. 6567 */ 6568 6569 ATH_TX_LOCK(sc); 6570 6571 /* 6572 * Legacy - we're called and the node isn't asleep. 6573 * Immediately punt. 6574 */ 6575 if (! an->an_is_powersave) { 6576 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6577 "%s: %6D: not in powersave?\n", 6578 __func__, 6579 ni->ni_macaddr, 6580 ":"); 6581 ATH_TX_UNLOCK(sc); 6582 avp->av_recv_pspoll(ni, m); 6583 return; 6584 } 6585 6586 /* 6587 * We're in powersave. 6588 * 6589 * Leak a frame. 6590 */ 6591 an->an_leak_count = 1; 6592 6593 /* 6594 * Now, if there's no frames in the node, just punt to 6595 * recv_pspoll. 6596 * 6597 * Don't bother checking if the TIM bit is set, we really 6598 * only care if there are any frames here! 6599 */ 6600 if (an->an_swq_depth == 0) { 6601 ATH_TX_UNLOCK(sc); 6602 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6603 "%s: %6D: SWQ empty; punting to net80211\n", 6604 __func__, 6605 ni->ni_macaddr, 6606 ":"); 6607 avp->av_recv_pspoll(ni, m); 6608 return; 6609 } 6610 6611 /* 6612 * Ok, let's schedule the highest TID that has traffic 6613 * and then schedule something. 6614 */ 6615 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { 6616 struct ath_tid *atid = &an->an_tid[tid]; 6617 /* 6618 * No frames? Skip. 6619 */ 6620 if (atid->axq_depth == 0) 6621 continue; 6622 ath_tx_tid_sched(sc, atid); 6623 /* 6624 * XXX we could do a direct call to the TXQ 6625 * scheduler code here to optimise latency 6626 * at the expense of a REALLY deep callstack. 6627 */ 6628 ATH_TX_UNLOCK(sc); 6629 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); 6630 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6631 "%s: %6D: leaking frame to TID %d\n", 6632 __func__, 6633 ni->ni_macaddr, 6634 ":", 6635 tid); 6636 return; 6637 } 6638 6639 ATH_TX_UNLOCK(sc); 6640 6641 /* 6642 * XXX nothing in the TIDs at this point? Eek. 6643 */ 6644 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, 6645 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", 6646 __func__, 6647 ni->ni_macaddr, 6648 ":"); 6649 avp->av_recv_pspoll(ni, m); 6650#else 6651 avp->av_recv_pspoll(ni, m); 6652#endif /* ATH_SW_PSQ */ 6653} 6654 6655MODULE_VERSION(if_ath, 1); 6656MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ 6657#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ) 6658MODULE_DEPEND(if_ath, alq, 1, 1, 1); 6659#endif 6660