if_ath.c revision 222497
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 *    redistribution must be conditioned upon including a substantially
14 *    similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 222497 2011-05-30 14:57:00Z adrian $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42#include "opt_wlan.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysctl.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/errno.h>
55#include <sys/callout.h>
56#include <sys/bus.h>
57#include <sys/endian.h>
58#include <sys/kthread.h>
59#include <sys/taskqueue.h>
60#include <sys/priv.h>
61#include <sys/module.h>
62
63#include <machine/bus.h>
64
65#include <net/if.h>
66#include <net/if_dl.h>
67#include <net/if_media.h>
68#include <net/if_types.h>
69#include <net/if_arp.h>
70#include <net/ethernet.h>
71#include <net/if_llc.h>
72
73#include <net80211/ieee80211_var.h>
74#include <net80211/ieee80211_regdomain.h>
75#ifdef IEEE80211_SUPPORT_SUPERG
76#include <net80211/ieee80211_superg.h>
77#endif
78#ifdef IEEE80211_SUPPORT_TDMA
79#include <net80211/ieee80211_tdma.h>
80#endif
81
82#include <net/bpf.h>
83
84#ifdef INET
85#include <netinet/in.h>
86#include <netinet/if_ether.h>
87#endif
88
89#include <dev/ath/if_athvar.h>
90#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
91#include <dev/ath/ath_hal/ah_diagcodes.h>
92
93#include <dev/ath/if_ath_debug.h>
94#include <dev/ath/if_ath_misc.h>
95#include <dev/ath/if_ath_tx.h>
96#include <dev/ath/if_ath_sysctl.h>
97#include <dev/ath/if_ath_keycache.h>
98
99#ifdef ATH_TX99_DIAG
100#include <dev/ath/ath_tx99/ath_tx99.h>
101#endif
102
103/*
104 * ATH_BCBUF determines the number of vap's that can transmit
105 * beacons and also (currently) the number of vap's that can
106 * have unique mac addresses/bssid.  When staggering beacons
107 * 4 is probably a good max as otherwise the beacons become
108 * very closely spaced and there is limited time for cab q traffic
109 * to go out.  You can burst beacons instead but that is not good
110 * for stations in power save and at some point you really want
111 * another radio (and channel).
112 *
113 * The limit on the number of mac addresses is tied to our use of
114 * the U/L bit and tracking addresses in a byte; it would be
115 * worthwhile to allow more for applications like proxy sta.
116 */
117CTASSERT(ATH_BCBUF <= 8);
118
119static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
120		    const char name[IFNAMSIZ], int unit, int opmode,
121		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
122		    const uint8_t mac[IEEE80211_ADDR_LEN]);
123static void	ath_vap_delete(struct ieee80211vap *);
124static void	ath_init(void *);
125static void	ath_stop_locked(struct ifnet *);
126static void	ath_stop(struct ifnet *);
127static void	ath_start(struct ifnet *);
128static int	ath_reset_vap(struct ieee80211vap *, u_long);
129static int	ath_media_change(struct ifnet *);
130static void	ath_watchdog(void *);
131static int	ath_ioctl(struct ifnet *, u_long, caddr_t);
132static void	ath_fatal_proc(void *, int);
133static void	ath_bmiss_vap(struct ieee80211vap *);
134static void	ath_bmiss_proc(void *, int);
135static void	ath_key_update_begin(struct ieee80211vap *);
136static void	ath_key_update_end(struct ieee80211vap *);
137static void	ath_update_mcast(struct ifnet *);
138static void	ath_update_promisc(struct ifnet *);
139static void	ath_mode_init(struct ath_softc *);
140static void	ath_setslottime(struct ath_softc *);
141static void	ath_updateslot(struct ifnet *);
142static int	ath_beaconq_setup(struct ath_hal *);
143static int	ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
144static void	ath_beacon_update(struct ieee80211vap *, int item);
145static void	ath_beacon_setup(struct ath_softc *, struct ath_buf *);
146static void	ath_beacon_proc(void *, int);
147static struct ath_buf *ath_beacon_generate(struct ath_softc *,
148			struct ieee80211vap *);
149static void	ath_bstuck_proc(void *, int);
150static void	ath_beacon_return(struct ath_softc *, struct ath_buf *);
151static void	ath_beacon_free(struct ath_softc *);
152static void	ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
153static void	ath_descdma_cleanup(struct ath_softc *sc,
154			struct ath_descdma *, ath_bufhead *);
155static int	ath_desc_alloc(struct ath_softc *);
156static void	ath_desc_free(struct ath_softc *);
157static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
158			const uint8_t [IEEE80211_ADDR_LEN]);
159static void	ath_node_free(struct ieee80211_node *);
160static void	ath_node_getsignal(const struct ieee80211_node *,
161			int8_t *, int8_t *);
162static int	ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
163static void	ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
164			int subtype, int rssi, int nf);
165static void	ath_setdefantenna(struct ath_softc *, u_int);
166static void	ath_rx_proc(void *, int);
167static void	ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
168static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
169static int	ath_tx_setup(struct ath_softc *, int, int);
170static int	ath_wme_update(struct ieee80211com *);
171static void	ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
172static void	ath_tx_cleanup(struct ath_softc *);
173static void	ath_tx_proc_q0(void *, int);
174static void	ath_tx_proc_q0123(void *, int);
175static void	ath_tx_proc(void *, int);
176static void	ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
177static int	ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
178static void	ath_draintxq(struct ath_softc *);
179static void	ath_stoprecv(struct ath_softc *);
180static int	ath_startrecv(struct ath_softc *);
181static void	ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
182static void	ath_scan_start(struct ieee80211com *);
183static void	ath_scan_end(struct ieee80211com *);
184static void	ath_set_channel(struct ieee80211com *);
185static void	ath_calibrate(void *);
186static int	ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
187static void	ath_setup_stationkey(struct ieee80211_node *);
188static void	ath_newassoc(struct ieee80211_node *, int);
189static int	ath_setregdomain(struct ieee80211com *,
190		    struct ieee80211_regdomain *, int,
191		    struct ieee80211_channel []);
192static void	ath_getradiocaps(struct ieee80211com *, int, int *,
193		    struct ieee80211_channel []);
194static int	ath_getchannels(struct ath_softc *);
195static void	ath_led_event(struct ath_softc *, int);
196
197static int	ath_rate_setup(struct ath_softc *, u_int mode);
198static void	ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
199
200static void	ath_announce(struct ath_softc *);
201
202#ifdef IEEE80211_SUPPORT_TDMA
203static void	ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
204		    u_int32_t bintval);
205static void	ath_tdma_bintvalsetup(struct ath_softc *sc,
206		    const struct ieee80211_tdma_state *tdma);
207static void	ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
208static void	ath_tdma_update(struct ieee80211_node *ni,
209		    const struct ieee80211_tdma_param *tdma, int);
210static void	ath_tdma_beacon_send(struct ath_softc *sc,
211		    struct ieee80211vap *vap);
212
213static __inline void
214ath_hal_setcca(struct ath_hal *ah, int ena)
215{
216	/*
217	 * NB: fill me in; this is not provided by default because disabling
218	 *     CCA in most locales violates regulatory.
219	 */
220}
221
222static __inline int
223ath_hal_getcca(struct ath_hal *ah)
224{
225	u_int32_t diag;
226	if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
227		return 1;
228	return ((diag & 0x500000) == 0);
229}
230
231#define	TDMA_EP_MULTIPLIER	(1<<10) /* pow2 to optimize out * and / */
232#define	TDMA_LPF_LEN		6
233#define	TDMA_DUMMY_MARKER	0x127
234#define	TDMA_EP_MUL(x, mul)	((x) * (mul))
235#define	TDMA_IN(x)		(TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
236#define	TDMA_LPF(x, y, len) \
237    ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
238#define	TDMA_SAMPLE(x, y) do {					\
239	x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN);		\
240} while (0)
241#define	TDMA_EP_RND(x,mul) \
242	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
243#define	TDMA_AVG(x)		TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
244#endif /* IEEE80211_SUPPORT_TDMA */
245
246SYSCTL_DECL(_hw_ath);
247
248/* XXX validate sysctl values */
249static	int ath_longcalinterval = 30;		/* long cals every 30 secs */
250SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
251	    0, "long chip calibration interval (secs)");
252static	int ath_shortcalinterval = 100;		/* short cals every 100 ms */
253SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
254	    0, "short chip calibration interval (msecs)");
255static	int ath_resetcalinterval = 20*60;	/* reset cal state 20 mins */
256SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
257	    0, "reset chip calibration results (secs)");
258static	int ath_anicalinterval = 100;		/* ANI calibration - 100 msec */
259SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
260	    0, "ANI calibration (msecs)");
261
262static	int ath_rxbuf = ATH_RXBUF;		/* # rx buffers to allocate */
263SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
264	    0, "rx buffers allocated");
265TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
266static	int ath_txbuf = ATH_TXBUF;		/* # tx buffers to allocate */
267SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
268	    0, "tx buffers allocated");
269TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
270
271static	int ath_bstuck_threshold = 4;		/* max missed beacons */
272SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
273	    0, "max missed beacon xmits before chip reset");
274
275MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
276
277#define	HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
278#define	HAL_MODE_HT40 \
279	(HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
280	HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
281int
282ath_attach(u_int16_t devid, struct ath_softc *sc)
283{
284	struct ifnet *ifp;
285	struct ieee80211com *ic;
286	struct ath_hal *ah = NULL;
287	HAL_STATUS status;
288	int error = 0, i;
289	u_int wmodes;
290	uint8_t macaddr[IEEE80211_ADDR_LEN];
291
292	DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
293
294	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
295	if (ifp == NULL) {
296		device_printf(sc->sc_dev, "can not if_alloc()\n");
297		error = ENOSPC;
298		goto bad;
299	}
300	ic = ifp->if_l2com;
301
302	/* set these up early for if_printf use */
303	if_initname(ifp, device_get_name(sc->sc_dev),
304		device_get_unit(sc->sc_dev));
305
306	ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, sc->sc_eepromdata, &status);
307	if (ah == NULL) {
308		if_printf(ifp, "unable to attach hardware; HAL status %u\n",
309			status);
310		error = ENXIO;
311		goto bad;
312	}
313	sc->sc_ah = ah;
314	sc->sc_invalid = 0;	/* ready to go, enable interrupt handling */
315#ifdef	ATH_DEBUG
316	sc->sc_debug = ath_debug;
317#endif
318
319	/*
320	 * Check if the MAC has multi-rate retry support.
321	 * We do this by trying to setup a fake extended
322	 * descriptor.  MAC's that don't have support will
323	 * return false w/o doing anything.  MAC's that do
324	 * support it will return true w/o doing anything.
325	 */
326	sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
327
328	/*
329	 * Check if the device has hardware counters for PHY
330	 * errors.  If so we need to enable the MIB interrupt
331	 * so we can act on stat triggers.
332	 */
333	if (ath_hal_hwphycounters(ah))
334		sc->sc_needmib = 1;
335
336	/*
337	 * Get the hardware key cache size.
338	 */
339	sc->sc_keymax = ath_hal_keycachesize(ah);
340	if (sc->sc_keymax > ATH_KEYMAX) {
341		if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
342			ATH_KEYMAX, sc->sc_keymax);
343		sc->sc_keymax = ATH_KEYMAX;
344	}
345	/*
346	 * Reset the key cache since some parts do not
347	 * reset the contents on initial power up.
348	 */
349	for (i = 0; i < sc->sc_keymax; i++)
350		ath_hal_keyreset(ah, i);
351
352	/*
353	 * Collect the default channel list.
354	 */
355	error = ath_getchannels(sc);
356	if (error != 0)
357		goto bad;
358
359	/*
360	 * Setup rate tables for all potential media types.
361	 */
362	ath_rate_setup(sc, IEEE80211_MODE_11A);
363	ath_rate_setup(sc, IEEE80211_MODE_11B);
364	ath_rate_setup(sc, IEEE80211_MODE_11G);
365	ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
366	ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
367	ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
368	ath_rate_setup(sc, IEEE80211_MODE_11NA);
369	ath_rate_setup(sc, IEEE80211_MODE_11NG);
370	ath_rate_setup(sc, IEEE80211_MODE_HALF);
371	ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
372
373	/* NB: setup here so ath_rate_update is happy */
374	ath_setcurmode(sc, IEEE80211_MODE_11A);
375
376	/*
377	 * Allocate tx+rx descriptors and populate the lists.
378	 */
379	error = ath_desc_alloc(sc);
380	if (error != 0) {
381		if_printf(ifp, "failed to allocate descriptors: %d\n", error);
382		goto bad;
383	}
384	callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
385	callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
386
387	ATH_TXBUF_LOCK_INIT(sc);
388
389	sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
390		taskqueue_thread_enqueue, &sc->sc_tq);
391	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
392		"%s taskq", ifp->if_xname);
393
394	TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
395	TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
396	TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
397
398	/*
399	 * Allocate hardware transmit queues: one queue for
400	 * beacon frames and one data queue for each QoS
401	 * priority.  Note that the hal handles resetting
402	 * these queues at the needed time.
403	 *
404	 * XXX PS-Poll
405	 */
406	sc->sc_bhalq = ath_beaconq_setup(ah);
407	if (sc->sc_bhalq == (u_int) -1) {
408		if_printf(ifp, "unable to setup a beacon xmit queue!\n");
409		error = EIO;
410		goto bad2;
411	}
412	sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
413	if (sc->sc_cabq == NULL) {
414		if_printf(ifp, "unable to setup CAB xmit queue!\n");
415		error = EIO;
416		goto bad2;
417	}
418	/* NB: insure BK queue is the lowest priority h/w queue */
419	if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
420		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
421			ieee80211_wme_acnames[WME_AC_BK]);
422		error = EIO;
423		goto bad2;
424	}
425	if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
426	    !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
427	    !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
428		/*
429		 * Not enough hardware tx queues to properly do WME;
430		 * just punt and assign them all to the same h/w queue.
431		 * We could do a better job of this if, for example,
432		 * we allocate queues when we switch from station to
433		 * AP mode.
434		 */
435		if (sc->sc_ac2q[WME_AC_VI] != NULL)
436			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
437		if (sc->sc_ac2q[WME_AC_BE] != NULL)
438			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
439		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
440		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
441		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
442	}
443
444	/*
445	 * Special case certain configurations.  Note the
446	 * CAB queue is handled by these specially so don't
447	 * include them when checking the txq setup mask.
448	 */
449	switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
450	case 0x01:
451		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
452		break;
453	case 0x0f:
454		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
455		break;
456	default:
457		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
458		break;
459	}
460
461	/*
462	 * Setup rate control.  Some rate control modules
463	 * call back to change the anntena state so expose
464	 * the necessary entry points.
465	 * XXX maybe belongs in struct ath_ratectrl?
466	 */
467	sc->sc_setdefantenna = ath_setdefantenna;
468	sc->sc_rc = ath_rate_attach(sc);
469	if (sc->sc_rc == NULL) {
470		error = EIO;
471		goto bad2;
472	}
473
474	sc->sc_blinking = 0;
475	sc->sc_ledstate = 1;
476	sc->sc_ledon = 0;			/* low true */
477	sc->sc_ledidle = (2700*hz)/1000;	/* 2.7sec */
478	callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
479	/*
480	 * Auto-enable soft led processing for IBM cards and for
481	 * 5211 minipci cards.  Users can also manually enable/disable
482	 * support with a sysctl.
483	 */
484	sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
485	if (sc->sc_softled) {
486		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
487		    HAL_GPIO_MUX_MAC_NETWORK_LED);
488		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
489	}
490
491	ifp->if_softc = sc;
492	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
493	ifp->if_start = ath_start;
494	ifp->if_ioctl = ath_ioctl;
495	ifp->if_init = ath_init;
496	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
497	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
498	IFQ_SET_READY(&ifp->if_snd);
499
500	ic->ic_ifp = ifp;
501	/* XXX not right but it's not used anywhere important */
502	ic->ic_phytype = IEEE80211_T_OFDM;
503	ic->ic_opmode = IEEE80211_M_STA;
504	ic->ic_caps =
505		  IEEE80211_C_STA		/* station mode */
506		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
507		| IEEE80211_C_HOSTAP		/* hostap mode */
508		| IEEE80211_C_MONITOR		/* monitor mode */
509		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
510		| IEEE80211_C_WDS		/* 4-address traffic works */
511		| IEEE80211_C_MBSS		/* mesh point link mode */
512		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
513		| IEEE80211_C_SHSLOT		/* short slot time supported */
514		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
515		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
516		| IEEE80211_C_TXFRAG		/* handle tx frags */
517		;
518	/*
519	 * Query the hal to figure out h/w crypto support.
520	 */
521	if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
522		ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
523	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
524		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
525	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
526		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
527	if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
528		ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
529	if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
530		ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
531		/*
532		 * Check if h/w does the MIC and/or whether the
533		 * separate key cache entries are required to
534		 * handle both tx+rx MIC keys.
535		 */
536		if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
537			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
538		/*
539		 * If the h/w supports storing tx+rx MIC keys
540		 * in one cache slot automatically enable use.
541		 */
542		if (ath_hal_hastkipsplit(ah) ||
543		    !ath_hal_settkipsplit(ah, AH_FALSE))
544			sc->sc_splitmic = 1;
545		/*
546		 * If the h/w can do TKIP MIC together with WME then
547		 * we use it; otherwise we force the MIC to be done
548		 * in software by the net80211 layer.
549		 */
550		if (ath_hal_haswmetkipmic(ah))
551			sc->sc_wmetkipmic = 1;
552	}
553	sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
554	/*
555	 * Check for multicast key search support.
556	 */
557	if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
558	    !ath_hal_getmcastkeysearch(sc->sc_ah)) {
559		ath_hal_setmcastkeysearch(sc->sc_ah, 1);
560	}
561	sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
562	/*
563	 * Mark key cache slots associated with global keys
564	 * as in use.  If we knew TKIP was not to be used we
565	 * could leave the +32, +64, and +32+64 slots free.
566	 */
567	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
568		setbit(sc->sc_keymap, i);
569		setbit(sc->sc_keymap, i+64);
570		if (sc->sc_splitmic) {
571			setbit(sc->sc_keymap, i+32);
572			setbit(sc->sc_keymap, i+32+64);
573		}
574	}
575	/*
576	 * TPC support can be done either with a global cap or
577	 * per-packet support.  The latter is not available on
578	 * all parts.  We're a bit pedantic here as all parts
579	 * support a global cap.
580	 */
581	if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
582		ic->ic_caps |= IEEE80211_C_TXPMGT;
583
584	/*
585	 * Mark WME capability only if we have sufficient
586	 * hardware queues to do proper priority scheduling.
587	 */
588	if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
589		ic->ic_caps |= IEEE80211_C_WME;
590	/*
591	 * Check for misc other capabilities.
592	 */
593	if (ath_hal_hasbursting(ah))
594		ic->ic_caps |= IEEE80211_C_BURST;
595	sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
596	sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
597	sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
598	sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
599	if (ath_hal_hasfastframes(ah))
600		ic->ic_caps |= IEEE80211_C_FF;
601	wmodes = ath_hal_getwirelessmodes(ah);
602	if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
603		ic->ic_caps |= IEEE80211_C_TURBOP;
604#ifdef IEEE80211_SUPPORT_TDMA
605	if (ath_hal_macversion(ah) > 0x78) {
606		ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
607		ic->ic_tdma_update = ath_tdma_update;
608	}
609#endif
610
611	/*
612	 * The if_ath 11n support is completely not ready for normal use.
613	 * Enabling this option will likely break everything and everything.
614	 * Don't think of doing that unless you know what you're doing.
615	 */
616
617#ifdef	ATH_ENABLE_11N
618	/*
619	 * Query HT capabilities
620	 */
621	if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
622	    (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
623		int rxs, txs;
624
625		device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
626		ic->ic_htcaps = IEEE80211_HTC_HT		/* HT operation */
627			    | IEEE80211_HTC_AMPDU		/* A-MPDU tx/rx */
628			    | IEEE80211_HTC_AMSDU		/* A-MSDU tx/rx */
629			    | IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
630			    | IEEE80211_HTCAP_SMPS_OFF;		/* SM power save off */
631			;
632
633		/*
634		 * Enable short-GI for HT20 only if the hardware
635		 * advertises support.
636		 * Notably, anything earlier than the AR9287 doesn't.
637		 */
638		if ((ath_hal_getcapability(ah,
639		    HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
640		    (wmodes & HAL_MODE_HT20)) {
641			device_printf(sc->sc_dev,
642			    "[HT] enabling short-GI in 20MHz mode\n");
643			ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
644		}
645
646		if (wmodes & HAL_MODE_HT40)
647			ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
648			    |  IEEE80211_HTCAP_SHORTGI40;
649
650		/*
651		 * rx/tx stream is not currently used anywhere; it needs to be taken
652		 * into account when negotiating which MCS rates it'll receive and
653		 * what MCS rates are available for TX.
654		 */
655		(void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &rxs);
656		(void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &txs);
657
658		ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
659		ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
660
661		ic->ic_txstream = txs;
662		ic->ic_rxstream = rxs;
663
664		device_printf(sc->sc_dev, "[HT] %d RX streams; %d TX streams\n", rxs, txs);
665	}
666#endif
667
668	/*
669	 * Indicate we need the 802.11 header padded to a
670	 * 32-bit boundary for 4-address and QoS frames.
671	 */
672	ic->ic_flags |= IEEE80211_F_DATAPAD;
673
674	/*
675	 * Query the hal about antenna support.
676	 */
677	sc->sc_defant = ath_hal_getdefantenna(ah);
678
679	/*
680	 * Not all chips have the VEOL support we want to
681	 * use with IBSS beacons; check here for it.
682	 */
683	sc->sc_hasveol = ath_hal_hasveol(ah);
684
685	/* get mac address from hardware */
686	ath_hal_getmac(ah, macaddr);
687	if (sc->sc_hasbmask)
688		ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
689
690	/* NB: used to size node table key mapping array */
691	ic->ic_max_keyix = sc->sc_keymax;
692	/* call MI attach routine. */
693	ieee80211_ifattach(ic, macaddr);
694	ic->ic_setregdomain = ath_setregdomain;
695	ic->ic_getradiocaps = ath_getradiocaps;
696	sc->sc_opmode = HAL_M_STA;
697
698	/* override default methods */
699	ic->ic_newassoc = ath_newassoc;
700	ic->ic_updateslot = ath_updateslot;
701	ic->ic_wme.wme_update = ath_wme_update;
702	ic->ic_vap_create = ath_vap_create;
703	ic->ic_vap_delete = ath_vap_delete;
704	ic->ic_raw_xmit = ath_raw_xmit;
705	ic->ic_update_mcast = ath_update_mcast;
706	ic->ic_update_promisc = ath_update_promisc;
707	ic->ic_node_alloc = ath_node_alloc;
708	sc->sc_node_free = ic->ic_node_free;
709	ic->ic_node_free = ath_node_free;
710	ic->ic_node_getsignal = ath_node_getsignal;
711	ic->ic_scan_start = ath_scan_start;
712	ic->ic_scan_end = ath_scan_end;
713	ic->ic_set_channel = ath_set_channel;
714
715	ieee80211_radiotap_attach(ic,
716	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
717		ATH_TX_RADIOTAP_PRESENT,
718	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
719		ATH_RX_RADIOTAP_PRESENT);
720
721	/*
722	 * Setup dynamic sysctl's now that country code and
723	 * regdomain are available from the hal.
724	 */
725	ath_sysctlattach(sc);
726	ath_sysctl_stats_attach(sc);
727
728	if (bootverbose)
729		ieee80211_announce(ic);
730	ath_announce(sc);
731	return 0;
732bad2:
733	ath_tx_cleanup(sc);
734	ath_desc_free(sc);
735bad:
736	if (ah)
737		ath_hal_detach(ah);
738	if (ifp != NULL)
739		if_free(ifp);
740	sc->sc_invalid = 1;
741	return error;
742}
743
744int
745ath_detach(struct ath_softc *sc)
746{
747	struct ifnet *ifp = sc->sc_ifp;
748
749	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
750		__func__, ifp->if_flags);
751
752	/*
753	 * NB: the order of these is important:
754	 * o stop the chip so no more interrupts will fire
755	 * o call the 802.11 layer before detaching the hal to
756	 *   insure callbacks into the driver to delete global
757	 *   key cache entries can be handled
758	 * o free the taskqueue which drains any pending tasks
759	 * o reclaim the tx queue data structures after calling
760	 *   the 802.11 layer as we'll get called back to reclaim
761	 *   node state and potentially want to use them
762	 * o to cleanup the tx queues the hal is called, so detach
763	 *   it last
764	 * Other than that, it's straightforward...
765	 */
766	ath_stop(ifp);
767	ieee80211_ifdetach(ifp->if_l2com);
768	taskqueue_free(sc->sc_tq);
769#ifdef ATH_TX99_DIAG
770	if (sc->sc_tx99 != NULL)
771		sc->sc_tx99->detach(sc->sc_tx99);
772#endif
773	ath_rate_detach(sc->sc_rc);
774	ath_desc_free(sc);
775	ath_tx_cleanup(sc);
776	ath_hal_detach(sc->sc_ah);	/* NB: sets chip in full sleep */
777	if_free(ifp);
778
779	return 0;
780}
781
782/*
783 * MAC address handling for multiple BSS on the same radio.
784 * The first vap uses the MAC address from the EEPROM.  For
785 * subsequent vap's we set the U/L bit (bit 1) in the MAC
786 * address and use the next six bits as an index.
787 */
788static void
789assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
790{
791	int i;
792
793	if (clone && sc->sc_hasbmask) {
794		/* NB: we only do this if h/w supports multiple bssid */
795		for (i = 0; i < 8; i++)
796			if ((sc->sc_bssidmask & (1<<i)) == 0)
797				break;
798		if (i != 0)
799			mac[0] |= (i << 2)|0x2;
800	} else
801		i = 0;
802	sc->sc_bssidmask |= 1<<i;
803	sc->sc_hwbssidmask[0] &= ~mac[0];
804	if (i == 0)
805		sc->sc_nbssid0++;
806}
807
808static void
809reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
810{
811	int i = mac[0] >> 2;
812	uint8_t mask;
813
814	if (i != 0 || --sc->sc_nbssid0 == 0) {
815		sc->sc_bssidmask &= ~(1<<i);
816		/* recalculate bssid mask from remaining addresses */
817		mask = 0xff;
818		for (i = 1; i < 8; i++)
819			if (sc->sc_bssidmask & (1<<i))
820				mask &= ~((i<<2)|0x2);
821		sc->sc_hwbssidmask[0] |= mask;
822	}
823}
824
825/*
826 * Assign a beacon xmit slot.  We try to space out
827 * assignments so when beacons are staggered the
828 * traffic coming out of the cab q has maximal time
829 * to go out before the next beacon is scheduled.
830 */
831static int
832assign_bslot(struct ath_softc *sc)
833{
834	u_int slot, free;
835
836	free = 0;
837	for (slot = 0; slot < ATH_BCBUF; slot++)
838		if (sc->sc_bslot[slot] == NULL) {
839			if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
840			    sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
841				return slot;
842			free = slot;
843			/* NB: keep looking for a double slot */
844		}
845	return free;
846}
847
848static struct ieee80211vap *
849ath_vap_create(struct ieee80211com *ic,
850	const char name[IFNAMSIZ], int unit, int opmode, int flags,
851	const uint8_t bssid[IEEE80211_ADDR_LEN],
852	const uint8_t mac0[IEEE80211_ADDR_LEN])
853{
854	struct ath_softc *sc = ic->ic_ifp->if_softc;
855	struct ath_vap *avp;
856	struct ieee80211vap *vap;
857	uint8_t mac[IEEE80211_ADDR_LEN];
858	int ic_opmode, needbeacon, error;
859
860	avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
861	    M_80211_VAP, M_WAITOK | M_ZERO);
862	needbeacon = 0;
863	IEEE80211_ADDR_COPY(mac, mac0);
864
865	ATH_LOCK(sc);
866	ic_opmode = opmode;		/* default to opmode of new vap */
867	switch (opmode) {
868	case IEEE80211_M_STA:
869		if (sc->sc_nstavaps != 0) {	/* XXX only 1 for now */
870			device_printf(sc->sc_dev, "only 1 sta vap supported\n");
871			goto bad;
872		}
873		if (sc->sc_nvaps) {
874			/*
875			 * With multiple vaps we must fall back
876			 * to s/w beacon miss handling.
877			 */
878			flags |= IEEE80211_CLONE_NOBEACONS;
879		}
880		if (flags & IEEE80211_CLONE_NOBEACONS) {
881			/*
882			 * Station mode w/o beacons are implemented w/ AP mode.
883			 */
884			ic_opmode = IEEE80211_M_HOSTAP;
885		}
886		break;
887	case IEEE80211_M_IBSS:
888		if (sc->sc_nvaps != 0) {	/* XXX only 1 for now */
889			device_printf(sc->sc_dev,
890			    "only 1 ibss vap supported\n");
891			goto bad;
892		}
893		needbeacon = 1;
894		break;
895	case IEEE80211_M_AHDEMO:
896#ifdef IEEE80211_SUPPORT_TDMA
897		if (flags & IEEE80211_CLONE_TDMA) {
898			if (sc->sc_nvaps != 0) {
899				device_printf(sc->sc_dev,
900				    "only 1 tdma vap supported\n");
901				goto bad;
902			}
903			needbeacon = 1;
904			flags |= IEEE80211_CLONE_NOBEACONS;
905		}
906		/* fall thru... */
907#endif
908	case IEEE80211_M_MONITOR:
909		if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
910			/*
911			 * Adopt existing mode.  Adding a monitor or ahdemo
912			 * vap to an existing configuration is of dubious
913			 * value but should be ok.
914			 */
915			/* XXX not right for monitor mode */
916			ic_opmode = ic->ic_opmode;
917		}
918		break;
919	case IEEE80211_M_HOSTAP:
920	case IEEE80211_M_MBSS:
921		needbeacon = 1;
922		break;
923	case IEEE80211_M_WDS:
924		if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
925			device_printf(sc->sc_dev,
926			    "wds not supported in sta mode\n");
927			goto bad;
928		}
929		/*
930		 * Silently remove any request for a unique
931		 * bssid; WDS vap's always share the local
932		 * mac address.
933		 */
934		flags &= ~IEEE80211_CLONE_BSSID;
935		if (sc->sc_nvaps == 0)
936			ic_opmode = IEEE80211_M_HOSTAP;
937		else
938			ic_opmode = ic->ic_opmode;
939		break;
940	default:
941		device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
942		goto bad;
943	}
944	/*
945	 * Check that a beacon buffer is available; the code below assumes it.
946	 */
947	if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
948		device_printf(sc->sc_dev, "no beacon buffer available\n");
949		goto bad;
950	}
951
952	/* STA, AHDEMO? */
953	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
954		assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
955		ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
956	}
957
958	vap = &avp->av_vap;
959	/* XXX can't hold mutex across if_alloc */
960	ATH_UNLOCK(sc);
961	error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
962	    bssid, mac);
963	ATH_LOCK(sc);
964	if (error != 0) {
965		device_printf(sc->sc_dev, "%s: error %d creating vap\n",
966		    __func__, error);
967		goto bad2;
968	}
969
970	/* h/w crypto support */
971	vap->iv_key_alloc = ath_key_alloc;
972	vap->iv_key_delete = ath_key_delete;
973	vap->iv_key_set = ath_key_set;
974	vap->iv_key_update_begin = ath_key_update_begin;
975	vap->iv_key_update_end = ath_key_update_end;
976
977	/* override various methods */
978	avp->av_recv_mgmt = vap->iv_recv_mgmt;
979	vap->iv_recv_mgmt = ath_recv_mgmt;
980	vap->iv_reset = ath_reset_vap;
981	vap->iv_update_beacon = ath_beacon_update;
982	avp->av_newstate = vap->iv_newstate;
983	vap->iv_newstate = ath_newstate;
984	avp->av_bmiss = vap->iv_bmiss;
985	vap->iv_bmiss = ath_bmiss_vap;
986
987	/* Set default parameters */
988
989	/*
990	 * Anything earlier than some AR9300 series MACs don't
991	 * support a smaller MPDU density.
992	 */
993	vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
994	/*
995	 * All NICs can handle the maximum size, however
996	 * AR5416 based MACs can only TX aggregates w/ RTS
997	 * protection when the total aggregate size is <= 8k.
998	 * However, for now that's enforced by the TX path.
999	 */
1000	vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1001
1002	avp->av_bslot = -1;
1003	if (needbeacon) {
1004		/*
1005		 * Allocate beacon state and setup the q for buffered
1006		 * multicast frames.  We know a beacon buffer is
1007		 * available because we checked above.
1008		 */
1009		avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
1010		STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
1011		if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1012			/*
1013			 * Assign the vap to a beacon xmit slot.  As above
1014			 * this cannot fail to find a free one.
1015			 */
1016			avp->av_bslot = assign_bslot(sc);
1017			KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1018			    ("beacon slot %u not empty", avp->av_bslot));
1019			sc->sc_bslot[avp->av_bslot] = vap;
1020			sc->sc_nbcnvaps++;
1021		}
1022		if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1023			/*
1024			 * Multple vaps are to transmit beacons and we
1025			 * have h/w support for TSF adjusting; enable
1026			 * use of staggered beacons.
1027			 */
1028			sc->sc_stagbeacons = 1;
1029		}
1030		ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1031	}
1032
1033	ic->ic_opmode = ic_opmode;
1034	if (opmode != IEEE80211_M_WDS) {
1035		sc->sc_nvaps++;
1036		if (opmode == IEEE80211_M_STA)
1037			sc->sc_nstavaps++;
1038		if (opmode == IEEE80211_M_MBSS)
1039			sc->sc_nmeshvaps++;
1040	}
1041	switch (ic_opmode) {
1042	case IEEE80211_M_IBSS:
1043		sc->sc_opmode = HAL_M_IBSS;
1044		break;
1045	case IEEE80211_M_STA:
1046		sc->sc_opmode = HAL_M_STA;
1047		break;
1048	case IEEE80211_M_AHDEMO:
1049#ifdef IEEE80211_SUPPORT_TDMA
1050		if (vap->iv_caps & IEEE80211_C_TDMA) {
1051			sc->sc_tdma = 1;
1052			/* NB: disable tsf adjust */
1053			sc->sc_stagbeacons = 0;
1054		}
1055		/*
1056		 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1057		 * just ap mode.
1058		 */
1059		/* fall thru... */
1060#endif
1061	case IEEE80211_M_HOSTAP:
1062	case IEEE80211_M_MBSS:
1063		sc->sc_opmode = HAL_M_HOSTAP;
1064		break;
1065	case IEEE80211_M_MONITOR:
1066		sc->sc_opmode = HAL_M_MONITOR;
1067		break;
1068	default:
1069		/* XXX should not happen */
1070		break;
1071	}
1072	if (sc->sc_hastsfadd) {
1073		/*
1074		 * Configure whether or not TSF adjust should be done.
1075		 */
1076		ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1077	}
1078	if (flags & IEEE80211_CLONE_NOBEACONS) {
1079		/*
1080		 * Enable s/w beacon miss handling.
1081		 */
1082		sc->sc_swbmiss = 1;
1083	}
1084	ATH_UNLOCK(sc);
1085
1086	/* complete setup */
1087	ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1088	return vap;
1089bad2:
1090	reclaim_address(sc, mac);
1091	ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1092bad:
1093	free(avp, M_80211_VAP);
1094	ATH_UNLOCK(sc);
1095	return NULL;
1096}
1097
1098static void
1099ath_vap_delete(struct ieee80211vap *vap)
1100{
1101	struct ieee80211com *ic = vap->iv_ic;
1102	struct ifnet *ifp = ic->ic_ifp;
1103	struct ath_softc *sc = ifp->if_softc;
1104	struct ath_hal *ah = sc->sc_ah;
1105	struct ath_vap *avp = ATH_VAP(vap);
1106
1107	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1108		/*
1109		 * Quiesce the hardware while we remove the vap.  In
1110		 * particular we need to reclaim all references to
1111		 * the vap state by any frames pending on the tx queues.
1112		 */
1113		ath_hal_intrset(ah, 0);		/* disable interrupts */
1114		ath_draintxq(sc);		/* stop xmit side */
1115		ath_stoprecv(sc);		/* stop recv side */
1116	}
1117
1118	ieee80211_vap_detach(vap);
1119	ATH_LOCK(sc);
1120	/*
1121	 * Reclaim beacon state.  Note this must be done before
1122	 * the vap instance is reclaimed as we may have a reference
1123	 * to it in the buffer for the beacon frame.
1124	 */
1125	if (avp->av_bcbuf != NULL) {
1126		if (avp->av_bslot != -1) {
1127			sc->sc_bslot[avp->av_bslot] = NULL;
1128			sc->sc_nbcnvaps--;
1129		}
1130		ath_beacon_return(sc, avp->av_bcbuf);
1131		avp->av_bcbuf = NULL;
1132		if (sc->sc_nbcnvaps == 0) {
1133			sc->sc_stagbeacons = 0;
1134			if (sc->sc_hastsfadd)
1135				ath_hal_settsfadjust(sc->sc_ah, 0);
1136		}
1137		/*
1138		 * Reclaim any pending mcast frames for the vap.
1139		 */
1140		ath_tx_draintxq(sc, &avp->av_mcastq);
1141		ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1142	}
1143	/*
1144	 * Update bookkeeping.
1145	 */
1146	if (vap->iv_opmode == IEEE80211_M_STA) {
1147		sc->sc_nstavaps--;
1148		if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1149			sc->sc_swbmiss = 0;
1150	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1151	    vap->iv_opmode == IEEE80211_M_MBSS) {
1152		reclaim_address(sc, vap->iv_myaddr);
1153		ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1154		if (vap->iv_opmode == IEEE80211_M_MBSS)
1155			sc->sc_nmeshvaps--;
1156	}
1157	if (vap->iv_opmode != IEEE80211_M_WDS)
1158		sc->sc_nvaps--;
1159#ifdef IEEE80211_SUPPORT_TDMA
1160	/* TDMA operation ceases when the last vap is destroyed */
1161	if (sc->sc_tdma && sc->sc_nvaps == 0) {
1162		sc->sc_tdma = 0;
1163		sc->sc_swbmiss = 0;
1164	}
1165#endif
1166	ATH_UNLOCK(sc);
1167	free(avp, M_80211_VAP);
1168
1169	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1170		/*
1171		 * Restart rx+tx machines if still running (RUNNING will
1172		 * be reset if we just destroyed the last vap).
1173		 */
1174		if (ath_startrecv(sc) != 0)
1175			if_printf(ifp, "%s: unable to restart recv logic\n",
1176			    __func__);
1177		if (sc->sc_beacons) {		/* restart beacons */
1178#ifdef IEEE80211_SUPPORT_TDMA
1179			if (sc->sc_tdma)
1180				ath_tdma_config(sc, NULL);
1181			else
1182#endif
1183				ath_beacon_config(sc, NULL);
1184		}
1185		ath_hal_intrset(ah, sc->sc_imask);
1186	}
1187}
1188
1189void
1190ath_suspend(struct ath_softc *sc)
1191{
1192	struct ifnet *ifp = sc->sc_ifp;
1193	struct ieee80211com *ic = ifp->if_l2com;
1194
1195	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1196		__func__, ifp->if_flags);
1197
1198	sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1199	if (ic->ic_opmode == IEEE80211_M_STA)
1200		ath_stop(ifp);
1201	else
1202		ieee80211_suspend_all(ic);
1203	/*
1204	 * NB: don't worry about putting the chip in low power
1205	 * mode; pci will power off our socket on suspend and
1206	 * CardBus detaches the device.
1207	 */
1208}
1209
1210/*
1211 * Reset the key cache since some parts do not reset the
1212 * contents on resume.  First we clear all entries, then
1213 * re-load keys that the 802.11 layer assumes are setup
1214 * in h/w.
1215 */
1216static void
1217ath_reset_keycache(struct ath_softc *sc)
1218{
1219	struct ifnet *ifp = sc->sc_ifp;
1220	struct ieee80211com *ic = ifp->if_l2com;
1221	struct ath_hal *ah = sc->sc_ah;
1222	int i;
1223
1224	for (i = 0; i < sc->sc_keymax; i++)
1225		ath_hal_keyreset(ah, i);
1226	ieee80211_crypto_reload_keys(ic);
1227}
1228
1229void
1230ath_resume(struct ath_softc *sc)
1231{
1232	struct ifnet *ifp = sc->sc_ifp;
1233	struct ieee80211com *ic = ifp->if_l2com;
1234	struct ath_hal *ah = sc->sc_ah;
1235	HAL_STATUS status;
1236
1237	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1238		__func__, ifp->if_flags);
1239
1240	/*
1241	 * Must reset the chip before we reload the
1242	 * keycache as we were powered down on suspend.
1243	 */
1244	ath_hal_reset(ah, sc->sc_opmode,
1245	    sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1246	    AH_FALSE, &status);
1247	ath_reset_keycache(sc);
1248	if (sc->sc_resume_up) {
1249		if (ic->ic_opmode == IEEE80211_M_STA) {
1250			ath_init(sc);
1251			/*
1252			 * Program the beacon registers using the last rx'd
1253			 * beacon frame and enable sync on the next beacon
1254			 * we see.  This should handle the case where we
1255			 * wakeup and find the same AP and also the case where
1256			 * we wakeup and need to roam.  For the latter we
1257			 * should get bmiss events that trigger a roam.
1258			 */
1259			ath_beacon_config(sc, NULL);
1260			sc->sc_syncbeacon = 1;
1261		} else
1262			ieee80211_resume_all(ic);
1263	}
1264	if (sc->sc_softled) {
1265		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1266		    HAL_GPIO_MUX_MAC_NETWORK_LED);
1267		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1268	}
1269}
1270
1271void
1272ath_shutdown(struct ath_softc *sc)
1273{
1274	struct ifnet *ifp = sc->sc_ifp;
1275
1276	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1277		__func__, ifp->if_flags);
1278
1279	ath_stop(ifp);
1280	/* NB: no point powering down chip as we're about to reboot */
1281}
1282
1283/*
1284 * Interrupt handler.  Most of the actual processing is deferred.
1285 */
1286void
1287ath_intr(void *arg)
1288{
1289	struct ath_softc *sc = arg;
1290	struct ifnet *ifp = sc->sc_ifp;
1291	struct ath_hal *ah = sc->sc_ah;
1292	HAL_INT status = 0;
1293
1294	if (sc->sc_invalid) {
1295		/*
1296		 * The hardware is not ready/present, don't touch anything.
1297		 * Note this can happen early on if the IRQ is shared.
1298		 */
1299		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1300		return;
1301	}
1302	if (!ath_hal_intrpend(ah))		/* shared irq, not for us */
1303		return;
1304	if ((ifp->if_flags & IFF_UP) == 0 ||
1305	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1306		HAL_INT status;
1307
1308		DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1309			__func__, ifp->if_flags);
1310		ath_hal_getisr(ah, &status);	/* clear ISR */
1311		ath_hal_intrset(ah, 0);		/* disable further intr's */
1312		return;
1313	}
1314	/*
1315	 * Figure out the reason(s) for the interrupt.  Note
1316	 * that the hal returns a pseudo-ISR that may include
1317	 * bits we haven't explicitly enabled so we mask the
1318	 * value to insure we only process bits we requested.
1319	 */
1320	ath_hal_getisr(ah, &status);		/* NB: clears ISR too */
1321	DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1322	status &= sc->sc_imask;			/* discard unasked for bits */
1323
1324	/* Short-circuit un-handled interrupts */
1325	if (status == 0x0)
1326		return;
1327
1328	if (status & HAL_INT_FATAL) {
1329		sc->sc_stats.ast_hardware++;
1330		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
1331		ath_fatal_proc(sc, 0);
1332	} else {
1333		if (status & HAL_INT_SWBA) {
1334			/*
1335			 * Software beacon alert--time to send a beacon.
1336			 * Handle beacon transmission directly; deferring
1337			 * this is too slow to meet timing constraints
1338			 * under load.
1339			 */
1340#ifdef IEEE80211_SUPPORT_TDMA
1341			if (sc->sc_tdma) {
1342				if (sc->sc_tdmaswba == 0) {
1343					struct ieee80211com *ic = ifp->if_l2com;
1344					struct ieee80211vap *vap =
1345					    TAILQ_FIRST(&ic->ic_vaps);
1346					ath_tdma_beacon_send(sc, vap);
1347					sc->sc_tdmaswba =
1348					    vap->iv_tdma->tdma_bintval;
1349				} else
1350					sc->sc_tdmaswba--;
1351			} else
1352#endif
1353			{
1354				ath_beacon_proc(sc, 0);
1355#ifdef IEEE80211_SUPPORT_SUPERG
1356				/*
1357				 * Schedule the rx taskq in case there's no
1358				 * traffic so any frames held on the staging
1359				 * queue are aged and potentially flushed.
1360				 */
1361				taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1362#endif
1363			}
1364		}
1365		if (status & HAL_INT_RXEOL) {
1366			/*
1367			 * NB: the hardware should re-read the link when
1368			 *     RXE bit is written, but it doesn't work at
1369			 *     least on older hardware revs.
1370			 */
1371			sc->sc_stats.ast_rxeol++;
1372			sc->sc_rxlink = NULL;
1373		}
1374		if (status & HAL_INT_TXURN) {
1375			sc->sc_stats.ast_txurn++;
1376			/* bump tx trigger level */
1377			ath_hal_updatetxtriglevel(ah, AH_TRUE);
1378		}
1379		if (status & HAL_INT_RX)
1380			taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1381		if (status & HAL_INT_TX)
1382			taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1383		if (status & HAL_INT_BMISS) {
1384			sc->sc_stats.ast_bmiss++;
1385			taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1386		}
1387		if (status & HAL_INT_GTT)
1388			sc->sc_stats.ast_tx_timeout++;
1389		if (status & HAL_INT_CST)
1390			sc->sc_stats.ast_tx_cst++;
1391		if (status & HAL_INT_MIB) {
1392			sc->sc_stats.ast_mib++;
1393			/*
1394			 * Disable interrupts until we service the MIB
1395			 * interrupt; otherwise it will continue to fire.
1396			 */
1397			ath_hal_intrset(ah, 0);
1398			/*
1399			 * Let the hal handle the event.  We assume it will
1400			 * clear whatever condition caused the interrupt.
1401			 */
1402			ath_hal_mibevent(ah, &sc->sc_halstats);
1403			ath_hal_intrset(ah, sc->sc_imask);
1404		}
1405		if (status & HAL_INT_RXORN) {
1406			/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1407			sc->sc_stats.ast_rxorn++;
1408		}
1409	}
1410}
1411
1412static void
1413ath_fatal_proc(void *arg, int pending)
1414{
1415	struct ath_softc *sc = arg;
1416	struct ifnet *ifp = sc->sc_ifp;
1417	u_int32_t *state;
1418	u_int32_t len;
1419	void *sp;
1420
1421	if_printf(ifp, "hardware error; resetting\n");
1422	/*
1423	 * Fatal errors are unrecoverable.  Typically these
1424	 * are caused by DMA errors.  Collect h/w state from
1425	 * the hal so we can diagnose what's going on.
1426	 */
1427	if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1428		KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1429		state = sp;
1430		if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1431		    state[0], state[1] , state[2], state[3],
1432		    state[4], state[5]);
1433	}
1434	ath_reset(ifp);
1435}
1436
1437static void
1438ath_bmiss_vap(struct ieee80211vap *vap)
1439{
1440	/*
1441	 * Workaround phantom bmiss interrupts by sanity-checking
1442	 * the time of our last rx'd frame.  If it is within the
1443	 * beacon miss interval then ignore the interrupt.  If it's
1444	 * truly a bmiss we'll get another interrupt soon and that'll
1445	 * be dispatched up for processing.  Note this applies only
1446	 * for h/w beacon miss events.
1447	 */
1448	if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1449		struct ifnet *ifp = vap->iv_ic->ic_ifp;
1450		struct ath_softc *sc = ifp->if_softc;
1451		u_int64_t lastrx = sc->sc_lastrx;
1452		u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1453		u_int bmisstimeout =
1454			vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1455
1456		DPRINTF(sc, ATH_DEBUG_BEACON,
1457		    "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1458		    __func__, (unsigned long long) tsf,
1459		    (unsigned long long)(tsf - lastrx),
1460		    (unsigned long long) lastrx, bmisstimeout);
1461
1462		if (tsf - lastrx <= bmisstimeout) {
1463			sc->sc_stats.ast_bmiss_phantom++;
1464			return;
1465		}
1466	}
1467	ATH_VAP(vap)->av_bmiss(vap);
1468}
1469
1470static int
1471ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1472{
1473	uint32_t rsize;
1474	void *sp;
1475
1476	if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1477		return 0;
1478	KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1479	*hangs = *(uint32_t *)sp;
1480	return 1;
1481}
1482
1483static void
1484ath_bmiss_proc(void *arg, int pending)
1485{
1486	struct ath_softc *sc = arg;
1487	struct ifnet *ifp = sc->sc_ifp;
1488	uint32_t hangs;
1489
1490	DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1491
1492	if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1493		if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1494		ath_reset(ifp);
1495	} else
1496		ieee80211_beacon_miss(ifp->if_l2com);
1497}
1498
1499/*
1500 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1501 * calcs together with WME.  If necessary disable the crypto
1502 * hardware and mark the 802.11 state so keys will be setup
1503 * with the MIC work done in software.
1504 */
1505static void
1506ath_settkipmic(struct ath_softc *sc)
1507{
1508	struct ifnet *ifp = sc->sc_ifp;
1509	struct ieee80211com *ic = ifp->if_l2com;
1510
1511	if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1512		if (ic->ic_flags & IEEE80211_F_WME) {
1513			ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1514			ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1515		} else {
1516			ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1517			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1518		}
1519	}
1520}
1521
1522static void
1523ath_init(void *arg)
1524{
1525	struct ath_softc *sc = (struct ath_softc *) arg;
1526	struct ifnet *ifp = sc->sc_ifp;
1527	struct ieee80211com *ic = ifp->if_l2com;
1528	struct ath_hal *ah = sc->sc_ah;
1529	HAL_STATUS status;
1530
1531	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1532		__func__, ifp->if_flags);
1533
1534	ATH_LOCK(sc);
1535	/*
1536	 * Stop anything previously setup.  This is safe
1537	 * whether this is the first time through or not.
1538	 */
1539	ath_stop_locked(ifp);
1540
1541	/*
1542	 * The basic interface to setting the hardware in a good
1543	 * state is ``reset''.  On return the hardware is known to
1544	 * be powered up and with interrupts disabled.  This must
1545	 * be followed by initialization of the appropriate bits
1546	 * and then setup of the interrupt mask.
1547	 */
1548	ath_settkipmic(sc);
1549	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1550		if_printf(ifp, "unable to reset hardware; hal status %u\n",
1551			status);
1552		ATH_UNLOCK(sc);
1553		return;
1554	}
1555	ath_chan_change(sc, ic->ic_curchan);
1556
1557	/*
1558	 * Likewise this is set during reset so update
1559	 * state cached in the driver.
1560	 */
1561	sc->sc_diversity = ath_hal_getdiversity(ah);
1562	sc->sc_lastlongcal = 0;
1563	sc->sc_resetcal = 1;
1564	sc->sc_lastcalreset = 0;
1565	sc->sc_lastani = 0;
1566	sc->sc_lastshortcal = 0;
1567	sc->sc_doresetcal = AH_FALSE;
1568
1569	/*
1570	 * Setup the hardware after reset: the key cache
1571	 * is filled as needed and the receive engine is
1572	 * set going.  Frame transmit is handled entirely
1573	 * in the frame output path; there's nothing to do
1574	 * here except setup the interrupt mask.
1575	 */
1576	if (ath_startrecv(sc) != 0) {
1577		if_printf(ifp, "unable to start recv logic\n");
1578		ATH_UNLOCK(sc);
1579		return;
1580	}
1581
1582	/*
1583	 * Enable interrupts.
1584	 */
1585	sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1586		  | HAL_INT_RXEOL | HAL_INT_RXORN
1587		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
1588	/*
1589	 * Enable MIB interrupts when there are hardware phy counters.
1590	 * Note we only do this (at the moment) for station mode.
1591	 */
1592	if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1593		sc->sc_imask |= HAL_INT_MIB;
1594
1595	/* Enable global TX timeout and carrier sense timeout if available */
1596	if (ath_hal_gtxto_supported(ah))
1597		sc->sc_imask |= HAL_INT_GTT;
1598
1599	DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
1600		__func__, sc->sc_imask);
1601
1602	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1603	callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1604	ath_hal_intrset(ah, sc->sc_imask);
1605
1606	ATH_UNLOCK(sc);
1607
1608#ifdef ATH_TX99_DIAG
1609	if (sc->sc_tx99 != NULL)
1610		sc->sc_tx99->start(sc->sc_tx99);
1611	else
1612#endif
1613	ieee80211_start_all(ic);		/* start all vap's */
1614}
1615
1616static void
1617ath_stop_locked(struct ifnet *ifp)
1618{
1619	struct ath_softc *sc = ifp->if_softc;
1620	struct ath_hal *ah = sc->sc_ah;
1621
1622	DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1623		__func__, sc->sc_invalid, ifp->if_flags);
1624
1625	ATH_LOCK_ASSERT(sc);
1626	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1627		/*
1628		 * Shutdown the hardware and driver:
1629		 *    reset 802.11 state machine
1630		 *    turn off timers
1631		 *    disable interrupts
1632		 *    turn off the radio
1633		 *    clear transmit machinery
1634		 *    clear receive machinery
1635		 *    drain and release tx queues
1636		 *    reclaim beacon resources
1637		 *    power down hardware
1638		 *
1639		 * Note that some of this work is not possible if the
1640		 * hardware is gone (invalid).
1641		 */
1642#ifdef ATH_TX99_DIAG
1643		if (sc->sc_tx99 != NULL)
1644			sc->sc_tx99->stop(sc->sc_tx99);
1645#endif
1646		callout_stop(&sc->sc_wd_ch);
1647		sc->sc_wd_timer = 0;
1648		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1649		if (!sc->sc_invalid) {
1650			if (sc->sc_softled) {
1651				callout_stop(&sc->sc_ledtimer);
1652				ath_hal_gpioset(ah, sc->sc_ledpin,
1653					!sc->sc_ledon);
1654				sc->sc_blinking = 0;
1655			}
1656			ath_hal_intrset(ah, 0);
1657		}
1658		ath_draintxq(sc);
1659		if (!sc->sc_invalid) {
1660			ath_stoprecv(sc);
1661			ath_hal_phydisable(ah);
1662		} else
1663			sc->sc_rxlink = NULL;
1664		ath_beacon_free(sc);	/* XXX not needed */
1665	}
1666}
1667
1668static void
1669ath_stop(struct ifnet *ifp)
1670{
1671	struct ath_softc *sc = ifp->if_softc;
1672
1673	ATH_LOCK(sc);
1674	ath_stop_locked(ifp);
1675	ATH_UNLOCK(sc);
1676}
1677
1678/*
1679 * Reset the hardware w/o losing operational state.  This is
1680 * basically a more efficient way of doing ath_stop, ath_init,
1681 * followed by state transitions to the current 802.11
1682 * operational state.  Used to recover from various errors and
1683 * to reset or reload hardware state.
1684 */
1685int
1686ath_reset(struct ifnet *ifp)
1687{
1688	struct ath_softc *sc = ifp->if_softc;
1689	struct ieee80211com *ic = ifp->if_l2com;
1690	struct ath_hal *ah = sc->sc_ah;
1691	HAL_STATUS status;
1692
1693	ath_hal_intrset(ah, 0);		/* disable interrupts */
1694	ath_draintxq(sc);		/* stop xmit side */
1695	ath_stoprecv(sc);		/* stop recv side */
1696	ath_settkipmic(sc);		/* configure TKIP MIC handling */
1697	/* NB: indicate channel change so we do a full reset */
1698	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
1699		if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1700			__func__, status);
1701	sc->sc_diversity = ath_hal_getdiversity(ah);
1702	if (ath_startrecv(sc) != 0)	/* restart recv */
1703		if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1704	/*
1705	 * We may be doing a reset in response to an ioctl
1706	 * that changes the channel so update any state that
1707	 * might change as a result.
1708	 */
1709	ath_chan_change(sc, ic->ic_curchan);
1710	if (sc->sc_beacons) {		/* restart beacons */
1711#ifdef IEEE80211_SUPPORT_TDMA
1712		if (sc->sc_tdma)
1713			ath_tdma_config(sc, NULL);
1714		else
1715#endif
1716			ath_beacon_config(sc, NULL);
1717	}
1718	ath_hal_intrset(ah, sc->sc_imask);
1719
1720	ath_start(ifp);			/* restart xmit */
1721	return 0;
1722}
1723
1724static int
1725ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1726{
1727	struct ieee80211com *ic = vap->iv_ic;
1728	struct ifnet *ifp = ic->ic_ifp;
1729	struct ath_softc *sc = ifp->if_softc;
1730	struct ath_hal *ah = sc->sc_ah;
1731
1732	switch (cmd) {
1733	case IEEE80211_IOC_TXPOWER:
1734		/*
1735		 * If per-packet TPC is enabled, then we have nothing
1736		 * to do; otherwise we need to force the global limit.
1737		 * All this can happen directly; no need to reset.
1738		 */
1739		if (!ath_hal_gettpc(ah))
1740			ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1741		return 0;
1742	}
1743	return ath_reset(ifp);
1744}
1745
1746struct ath_buf *
1747_ath_getbuf_locked(struct ath_softc *sc)
1748{
1749	struct ath_buf *bf;
1750
1751	ATH_TXBUF_LOCK_ASSERT(sc);
1752
1753	bf = STAILQ_FIRST(&sc->sc_txbuf);
1754	if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1755		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1756	else
1757		bf = NULL;
1758	if (bf == NULL) {
1759		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1760		    STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1761			"out of xmit buffers" : "xmit buffer busy");
1762	}
1763	return bf;
1764}
1765
1766struct ath_buf *
1767ath_getbuf(struct ath_softc *sc)
1768{
1769	struct ath_buf *bf;
1770
1771	ATH_TXBUF_LOCK(sc);
1772	bf = _ath_getbuf_locked(sc);
1773	if (bf == NULL) {
1774		struct ifnet *ifp = sc->sc_ifp;
1775
1776		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
1777		sc->sc_stats.ast_tx_qstop++;
1778		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1779	}
1780	ATH_TXBUF_UNLOCK(sc);
1781	return bf;
1782}
1783
1784static void
1785ath_start(struct ifnet *ifp)
1786{
1787	struct ath_softc *sc = ifp->if_softc;
1788	struct ieee80211_node *ni;
1789	struct ath_buf *bf;
1790	struct mbuf *m, *next;
1791	ath_bufhead frags;
1792
1793	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1794		return;
1795	for (;;) {
1796		/*
1797		 * Grab a TX buffer and associated resources.
1798		 */
1799		bf = ath_getbuf(sc);
1800		if (bf == NULL)
1801			break;
1802
1803		IFQ_DEQUEUE(&ifp->if_snd, m);
1804		if (m == NULL) {
1805			ATH_TXBUF_LOCK(sc);
1806			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1807			ATH_TXBUF_UNLOCK(sc);
1808			break;
1809		}
1810		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1811		/*
1812		 * Check for fragmentation.  If this frame
1813		 * has been broken up verify we have enough
1814		 * buffers to send all the fragments so all
1815		 * go out or none...
1816		 */
1817		STAILQ_INIT(&frags);
1818		if ((m->m_flags & M_FRAG) &&
1819		    !ath_txfrag_setup(sc, &frags, m, ni)) {
1820			DPRINTF(sc, ATH_DEBUG_XMIT,
1821			    "%s: out of txfrag buffers\n", __func__);
1822			sc->sc_stats.ast_tx_nofrag++;
1823			ifp->if_oerrors++;
1824			ath_freetx(m);
1825			goto bad;
1826		}
1827		ifp->if_opackets++;
1828	nextfrag:
1829		/*
1830		 * Pass the frame to the h/w for transmission.
1831		 * Fragmented frames have each frag chained together
1832		 * with m_nextpkt.  We know there are sufficient ath_buf's
1833		 * to send all the frags because of work done by
1834		 * ath_txfrag_setup.  We leave m_nextpkt set while
1835		 * calling ath_tx_start so it can use it to extend the
1836		 * the tx duration to cover the subsequent frag and
1837		 * so it can reclaim all the mbufs in case of an error;
1838		 * ath_tx_start clears m_nextpkt once it commits to
1839		 * handing the frame to the hardware.
1840		 */
1841		next = m->m_nextpkt;
1842		if (ath_tx_start(sc, ni, bf, m)) {
1843	bad:
1844			ifp->if_oerrors++;
1845	reclaim:
1846			bf->bf_m = NULL;
1847			bf->bf_node = NULL;
1848			ATH_TXBUF_LOCK(sc);
1849			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1850			ath_txfrag_cleanup(sc, &frags, ni);
1851			ATH_TXBUF_UNLOCK(sc);
1852			if (ni != NULL)
1853				ieee80211_free_node(ni);
1854			continue;
1855		}
1856		if (next != NULL) {
1857			/*
1858			 * Beware of state changing between frags.
1859			 * XXX check sta power-save state?
1860			 */
1861			if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
1862				DPRINTF(sc, ATH_DEBUG_XMIT,
1863				    "%s: flush fragmented packet, state %s\n",
1864				    __func__,
1865				    ieee80211_state_name[ni->ni_vap->iv_state]);
1866				ath_freetx(next);
1867				goto reclaim;
1868			}
1869			m = next;
1870			bf = STAILQ_FIRST(&frags);
1871			KASSERT(bf != NULL, ("no buf for txfrag"));
1872			STAILQ_REMOVE_HEAD(&frags, bf_list);
1873			goto nextfrag;
1874		}
1875
1876		sc->sc_wd_timer = 5;
1877	}
1878}
1879
1880static int
1881ath_media_change(struct ifnet *ifp)
1882{
1883	int error = ieee80211_media_change(ifp);
1884	/* NB: only the fixed rate can change and that doesn't need a reset */
1885	return (error == ENETRESET ? 0 : error);
1886}
1887
1888/*
1889 * Block/unblock tx+rx processing while a key change is done.
1890 * We assume the caller serializes key management operations
1891 * so we only need to worry about synchronization with other
1892 * uses that originate in the driver.
1893 */
1894static void
1895ath_key_update_begin(struct ieee80211vap *vap)
1896{
1897	struct ifnet *ifp = vap->iv_ic->ic_ifp;
1898	struct ath_softc *sc = ifp->if_softc;
1899
1900	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
1901	taskqueue_block(sc->sc_tq);
1902	IF_LOCK(&ifp->if_snd);		/* NB: doesn't block mgmt frames */
1903}
1904
1905static void
1906ath_key_update_end(struct ieee80211vap *vap)
1907{
1908	struct ifnet *ifp = vap->iv_ic->ic_ifp;
1909	struct ath_softc *sc = ifp->if_softc;
1910
1911	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
1912	IF_UNLOCK(&ifp->if_snd);
1913	taskqueue_unblock(sc->sc_tq);
1914}
1915
1916/*
1917 * Calculate the receive filter according to the
1918 * operating mode and state:
1919 *
1920 * o always accept unicast, broadcast, and multicast traffic
1921 * o accept PHY error frames when hardware doesn't have MIB support
1922 *   to count and we need them for ANI (sta mode only until recently)
1923 *   and we are not scanning (ANI is disabled)
1924 *   NB: older hal's add rx filter bits out of sight and we need to
1925 *	 blindly preserve them
1926 * o probe request frames are accepted only when operating in
1927 *   hostap, adhoc, mesh, or monitor modes
1928 * o enable promiscuous mode
1929 *   - when in monitor mode
1930 *   - if interface marked PROMISC (assumes bridge setting is filtered)
1931 * o accept beacons:
1932 *   - when operating in station mode for collecting rssi data when
1933 *     the station is otherwise quiet, or
1934 *   - when operating in adhoc mode so the 802.11 layer creates
1935 *     node table entries for peers,
1936 *   - when scanning
1937 *   - when doing s/w beacon miss (e.g. for ap+sta)
1938 *   - when operating in ap mode in 11g to detect overlapping bss that
1939 *     require protection
1940 *   - when operating in mesh mode to detect neighbors
1941 * o accept control frames:
1942 *   - when in monitor mode
1943 * XXX HT protection for 11n
1944 */
1945static u_int32_t
1946ath_calcrxfilter(struct ath_softc *sc)
1947{
1948	struct ifnet *ifp = sc->sc_ifp;
1949	struct ieee80211com *ic = ifp->if_l2com;
1950	u_int32_t rfilt;
1951
1952	rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
1953	if (!sc->sc_needmib && !sc->sc_scanning)
1954		rfilt |= HAL_RX_FILTER_PHYERR;
1955	if (ic->ic_opmode != IEEE80211_M_STA)
1956		rfilt |= HAL_RX_FILTER_PROBEREQ;
1957	/* XXX ic->ic_monvaps != 0? */
1958	if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
1959		rfilt |= HAL_RX_FILTER_PROM;
1960	if (ic->ic_opmode == IEEE80211_M_STA ||
1961	    ic->ic_opmode == IEEE80211_M_IBSS ||
1962	    sc->sc_swbmiss || sc->sc_scanning)
1963		rfilt |= HAL_RX_FILTER_BEACON;
1964	/*
1965	 * NB: We don't recalculate the rx filter when
1966	 * ic_protmode changes; otherwise we could do
1967	 * this only when ic_protmode != NONE.
1968	 */
1969	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1970	    IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
1971		rfilt |= HAL_RX_FILTER_BEACON;
1972
1973	/*
1974	 * Enable hardware PS-POLL RX only for hostap mode;
1975	 * STA mode sends PS-POLL frames but never
1976	 * receives them.
1977	 */
1978	if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
1979	    0, NULL) == HAL_OK &&
1980	    ic->ic_opmode == IEEE80211_M_HOSTAP)
1981		rfilt |= HAL_RX_FILTER_PSPOLL;
1982
1983	if (sc->sc_nmeshvaps) {
1984		rfilt |= HAL_RX_FILTER_BEACON;
1985		if (sc->sc_hasbmatch)
1986			rfilt |= HAL_RX_FILTER_BSSID;
1987		else
1988			rfilt |= HAL_RX_FILTER_PROM;
1989	}
1990	if (ic->ic_opmode == IEEE80211_M_MONITOR)
1991		rfilt |= HAL_RX_FILTER_CONTROL;
1992
1993	/*
1994	 * Enable RX of compressed BAR frames only when doing
1995	 * 802.11n. Required for A-MPDU.
1996	 */
1997	if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
1998		rfilt |= HAL_RX_FILTER_COMPBAR;
1999
2000	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2001	    __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2002	return rfilt;
2003}
2004
2005static void
2006ath_update_promisc(struct ifnet *ifp)
2007{
2008	struct ath_softc *sc = ifp->if_softc;
2009	u_int32_t rfilt;
2010
2011	/* configure rx filter */
2012	rfilt = ath_calcrxfilter(sc);
2013	ath_hal_setrxfilter(sc->sc_ah, rfilt);
2014
2015	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2016}
2017
2018static void
2019ath_update_mcast(struct ifnet *ifp)
2020{
2021	struct ath_softc *sc = ifp->if_softc;
2022	u_int32_t mfilt[2];
2023
2024	/* calculate and install multicast filter */
2025	if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2026		struct ifmultiaddr *ifma;
2027		/*
2028		 * Merge multicast addresses to form the hardware filter.
2029		 */
2030		mfilt[0] = mfilt[1] = 0;
2031		if_maddr_rlock(ifp);	/* XXX need some fiddling to remove? */
2032		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2033			caddr_t dl;
2034			u_int32_t val;
2035			u_int8_t pos;
2036
2037			/* calculate XOR of eight 6bit values */
2038			dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2039			val = LE_READ_4(dl + 0);
2040			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2041			val = LE_READ_4(dl + 3);
2042			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2043			pos &= 0x3f;
2044			mfilt[pos / 32] |= (1 << (pos % 32));
2045		}
2046		if_maddr_runlock(ifp);
2047	} else
2048		mfilt[0] = mfilt[1] = ~0;
2049	ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2050	DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2051		__func__, mfilt[0], mfilt[1]);
2052}
2053
2054static void
2055ath_mode_init(struct ath_softc *sc)
2056{
2057	struct ifnet *ifp = sc->sc_ifp;
2058	struct ath_hal *ah = sc->sc_ah;
2059	u_int32_t rfilt;
2060
2061	/* configure rx filter */
2062	rfilt = ath_calcrxfilter(sc);
2063	ath_hal_setrxfilter(ah, rfilt);
2064
2065	/* configure operational mode */
2066	ath_hal_setopmode(ah);
2067
2068	/* handle any link-level address change */
2069	ath_hal_setmac(ah, IF_LLADDR(ifp));
2070
2071	/* calculate and install multicast filter */
2072	ath_update_mcast(ifp);
2073}
2074
2075/*
2076 * Set the slot time based on the current setting.
2077 */
2078static void
2079ath_setslottime(struct ath_softc *sc)
2080{
2081	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2082	struct ath_hal *ah = sc->sc_ah;
2083	u_int usec;
2084
2085	if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2086		usec = 13;
2087	else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2088		usec = 21;
2089	else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2090		/* honor short/long slot time only in 11g */
2091		/* XXX shouldn't honor on pure g or turbo g channel */
2092		if (ic->ic_flags & IEEE80211_F_SHSLOT)
2093			usec = HAL_SLOT_TIME_9;
2094		else
2095			usec = HAL_SLOT_TIME_20;
2096	} else
2097		usec = HAL_SLOT_TIME_9;
2098
2099	DPRINTF(sc, ATH_DEBUG_RESET,
2100	    "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2101	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2102	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2103
2104	ath_hal_setslottime(ah, usec);
2105	sc->sc_updateslot = OK;
2106}
2107
2108/*
2109 * Callback from the 802.11 layer to update the
2110 * slot time based on the current setting.
2111 */
2112static void
2113ath_updateslot(struct ifnet *ifp)
2114{
2115	struct ath_softc *sc = ifp->if_softc;
2116	struct ieee80211com *ic = ifp->if_l2com;
2117
2118	/*
2119	 * When not coordinating the BSS, change the hardware
2120	 * immediately.  For other operation we defer the change
2121	 * until beacon updates have propagated to the stations.
2122	 */
2123	if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2124	    ic->ic_opmode == IEEE80211_M_MBSS)
2125		sc->sc_updateslot = UPDATE;
2126	else
2127		ath_setslottime(sc);
2128}
2129
2130/*
2131 * Setup a h/w transmit queue for beacons.
2132 */
2133static int
2134ath_beaconq_setup(struct ath_hal *ah)
2135{
2136	HAL_TXQ_INFO qi;
2137
2138	memset(&qi, 0, sizeof(qi));
2139	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2140	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2141	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2142	/* NB: for dynamic turbo, don't enable any other interrupts */
2143	qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2144	return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2145}
2146
2147/*
2148 * Setup the transmit queue parameters for the beacon queue.
2149 */
2150static int
2151ath_beaconq_config(struct ath_softc *sc)
2152{
2153#define	ATH_EXPONENT_TO_VALUE(v)	((1<<(v))-1)
2154	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2155	struct ath_hal *ah = sc->sc_ah;
2156	HAL_TXQ_INFO qi;
2157
2158	ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2159	if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2160	    ic->ic_opmode == IEEE80211_M_MBSS) {
2161		/*
2162		 * Always burst out beacon and CAB traffic.
2163		 */
2164		qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2165		qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2166		qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2167	} else {
2168		struct wmeParams *wmep =
2169			&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2170		/*
2171		 * Adhoc mode; important thing is to use 2x cwmin.
2172		 */
2173		qi.tqi_aifs = wmep->wmep_aifsn;
2174		qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2175		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2176	}
2177
2178	if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2179		device_printf(sc->sc_dev, "unable to update parameters for "
2180			"beacon hardware queue!\n");
2181		return 0;
2182	} else {
2183		ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2184		return 1;
2185	}
2186#undef ATH_EXPONENT_TO_VALUE
2187}
2188
2189/*
2190 * Allocate and setup an initial beacon frame.
2191 */
2192static int
2193ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2194{
2195	struct ieee80211vap *vap = ni->ni_vap;
2196	struct ath_vap *avp = ATH_VAP(vap);
2197	struct ath_buf *bf;
2198	struct mbuf *m;
2199	int error;
2200
2201	bf = avp->av_bcbuf;
2202	if (bf->bf_m != NULL) {
2203		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2204		m_freem(bf->bf_m);
2205		bf->bf_m = NULL;
2206	}
2207	if (bf->bf_node != NULL) {
2208		ieee80211_free_node(bf->bf_node);
2209		bf->bf_node = NULL;
2210	}
2211
2212	/*
2213	 * NB: the beacon data buffer must be 32-bit aligned;
2214	 * we assume the mbuf routines will return us something
2215	 * with this alignment (perhaps should assert).
2216	 */
2217	m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2218	if (m == NULL) {
2219		device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2220		sc->sc_stats.ast_be_nombuf++;
2221		return ENOMEM;
2222	}
2223	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2224				     bf->bf_segs, &bf->bf_nseg,
2225				     BUS_DMA_NOWAIT);
2226	if (error != 0) {
2227		device_printf(sc->sc_dev,
2228		    "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2229		    __func__, error);
2230		m_freem(m);
2231		return error;
2232	}
2233
2234	/*
2235	 * Calculate a TSF adjustment factor required for staggered
2236	 * beacons.  Note that we assume the format of the beacon
2237	 * frame leaves the tstamp field immediately following the
2238	 * header.
2239	 */
2240	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2241		uint64_t tsfadjust;
2242		struct ieee80211_frame *wh;
2243
2244		/*
2245		 * The beacon interval is in TU's; the TSF is in usecs.
2246		 * We figure out how many TU's to add to align the timestamp
2247		 * then convert to TSF units and handle byte swapping before
2248		 * inserting it in the frame.  The hardware will then add this
2249		 * each time a beacon frame is sent.  Note that we align vap's
2250		 * 1..N and leave vap 0 untouched.  This means vap 0 has a
2251		 * timestamp in one beacon interval while the others get a
2252		 * timstamp aligned to the next interval.
2253		 */
2254		tsfadjust = ni->ni_intval *
2255		    (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2256		tsfadjust = htole64(tsfadjust << 10);	/* TU -> TSF */
2257
2258		DPRINTF(sc, ATH_DEBUG_BEACON,
2259		    "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2260		    __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2261		    avp->av_bslot, ni->ni_intval,
2262		    (long long unsigned) le64toh(tsfadjust));
2263
2264		wh = mtod(m, struct ieee80211_frame *);
2265		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2266	}
2267	bf->bf_m = m;
2268	bf->bf_node = ieee80211_ref_node(ni);
2269
2270	return 0;
2271}
2272
2273/*
2274 * Setup the beacon frame for transmit.
2275 */
2276static void
2277ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2278{
2279#define	USE_SHPREAMBLE(_ic) \
2280	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2281		== IEEE80211_F_SHPREAMBLE)
2282	struct ieee80211_node *ni = bf->bf_node;
2283	struct ieee80211com *ic = ni->ni_ic;
2284	struct mbuf *m = bf->bf_m;
2285	struct ath_hal *ah = sc->sc_ah;
2286	struct ath_desc *ds;
2287	int flags, antenna;
2288	const HAL_RATE_TABLE *rt;
2289	u_int8_t rix, rate;
2290
2291	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2292		__func__, m, m->m_len);
2293
2294	/* setup descriptors */
2295	ds = bf->bf_desc;
2296
2297	flags = HAL_TXDESC_NOACK;
2298	if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2299		ds->ds_link = bf->bf_daddr;	/* self-linked */
2300		flags |= HAL_TXDESC_VEOL;
2301		/*
2302		 * Let hardware handle antenna switching.
2303		 */
2304		antenna = sc->sc_txantenna;
2305	} else {
2306		ds->ds_link = 0;
2307		/*
2308		 * Switch antenna every 4 beacons.
2309		 * XXX assumes two antenna
2310		 */
2311		if (sc->sc_txantenna != 0)
2312			antenna = sc->sc_txantenna;
2313		else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2314			antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2315		else
2316			antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2317	}
2318
2319	KASSERT(bf->bf_nseg == 1,
2320		("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2321	ds->ds_data = bf->bf_segs[0].ds_addr;
2322	/*
2323	 * Calculate rate code.
2324	 * XXX everything at min xmit rate
2325	 */
2326	rix = 0;
2327	rt = sc->sc_currates;
2328	rate = rt->info[rix].rateCode;
2329	if (USE_SHPREAMBLE(ic))
2330		rate |= rt->info[rix].shortPreamble;
2331	ath_hal_setuptxdesc(ah, ds
2332		, m->m_len + IEEE80211_CRC_LEN	/* frame length */
2333		, sizeof(struct ieee80211_frame)/* header length */
2334		, HAL_PKT_TYPE_BEACON		/* Atheros packet type */
2335		, ni->ni_txpower		/* txpower XXX */
2336		, rate, 1			/* series 0 rate/tries */
2337		, HAL_TXKEYIX_INVALID		/* no encryption */
2338		, antenna			/* antenna mode */
2339		, flags				/* no ack, veol for beacons */
2340		, 0				/* rts/cts rate */
2341		, 0				/* rts/cts duration */
2342	);
2343	/* NB: beacon's BufLen must be a multiple of 4 bytes */
2344	ath_hal_filltxdesc(ah, ds
2345		, roundup(m->m_len, 4)		/* buffer length */
2346		, AH_TRUE			/* first segment */
2347		, AH_TRUE			/* last segment */
2348		, ds				/* first descriptor */
2349	);
2350#if 0
2351	ath_desc_swap(ds);
2352#endif
2353#undef USE_SHPREAMBLE
2354}
2355
2356static void
2357ath_beacon_update(struct ieee80211vap *vap, int item)
2358{
2359	struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2360
2361	setbit(bo->bo_flags, item);
2362}
2363
2364/*
2365 * Append the contents of src to dst; both queues
2366 * are assumed to be locked.
2367 */
2368static void
2369ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2370{
2371	STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
2372	dst->axq_link = src->axq_link;
2373	src->axq_link = NULL;
2374	dst->axq_depth += src->axq_depth;
2375	src->axq_depth = 0;
2376}
2377
2378/*
2379 * Transmit a beacon frame at SWBA.  Dynamic updates to the
2380 * frame contents are done as needed and the slot time is
2381 * also adjusted based on current state.
2382 */
2383static void
2384ath_beacon_proc(void *arg, int pending)
2385{
2386	struct ath_softc *sc = arg;
2387	struct ath_hal *ah = sc->sc_ah;
2388	struct ieee80211vap *vap;
2389	struct ath_buf *bf;
2390	int slot, otherant;
2391	uint32_t bfaddr;
2392
2393	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2394		__func__, pending);
2395	/*
2396	 * Check if the previous beacon has gone out.  If
2397	 * not don't try to post another, skip this period
2398	 * and wait for the next.  Missed beacons indicate
2399	 * a problem and should not occur.  If we miss too
2400	 * many consecutive beacons reset the device.
2401	 */
2402	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2403		sc->sc_bmisscount++;
2404		sc->sc_stats.ast_be_missed++;
2405		DPRINTF(sc, ATH_DEBUG_BEACON,
2406			"%s: missed %u consecutive beacons\n",
2407			__func__, sc->sc_bmisscount);
2408		if (sc->sc_bmisscount >= ath_bstuck_threshold)
2409			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2410		return;
2411	}
2412	if (sc->sc_bmisscount != 0) {
2413		DPRINTF(sc, ATH_DEBUG_BEACON,
2414			"%s: resume beacon xmit after %u misses\n",
2415			__func__, sc->sc_bmisscount);
2416		sc->sc_bmisscount = 0;
2417	}
2418
2419	if (sc->sc_stagbeacons) {			/* staggered beacons */
2420		struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2421		uint32_t tsftu;
2422
2423		tsftu = ath_hal_gettsf32(ah) >> 10;
2424		/* XXX lintval */
2425		slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2426		vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2427		bfaddr = 0;
2428		if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2429			bf = ath_beacon_generate(sc, vap);
2430			if (bf != NULL)
2431				bfaddr = bf->bf_daddr;
2432		}
2433	} else {					/* burst'd beacons */
2434		uint32_t *bflink = &bfaddr;
2435
2436		for (slot = 0; slot < ATH_BCBUF; slot++) {
2437			vap = sc->sc_bslot[slot];
2438			if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2439				bf = ath_beacon_generate(sc, vap);
2440				if (bf != NULL) {
2441					*bflink = bf->bf_daddr;
2442					bflink = &bf->bf_desc->ds_link;
2443				}
2444			}
2445		}
2446		*bflink = 0;				/* terminate list */
2447	}
2448
2449	/*
2450	 * Handle slot time change when a non-ERP station joins/leaves
2451	 * an 11g network.  The 802.11 layer notifies us via callback,
2452	 * we mark updateslot, then wait one beacon before effecting
2453	 * the change.  This gives associated stations at least one
2454	 * beacon interval to note the state change.
2455	 */
2456	/* XXX locking */
2457	if (sc->sc_updateslot == UPDATE) {
2458		sc->sc_updateslot = COMMIT;	/* commit next beacon */
2459		sc->sc_slotupdate = slot;
2460	} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
2461		ath_setslottime(sc);		/* commit change to h/w */
2462
2463	/*
2464	 * Check recent per-antenna transmit statistics and flip
2465	 * the default antenna if noticeably more frames went out
2466	 * on the non-default antenna.
2467	 * XXX assumes 2 anntenae
2468	 */
2469	if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2470		otherant = sc->sc_defant & 1 ? 2 : 1;
2471		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2472			ath_setdefantenna(sc, otherant);
2473		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2474	}
2475
2476	if (bfaddr != 0) {
2477		/*
2478		 * Stop any current dma and put the new frame on the queue.
2479		 * This should never fail since we check above that no frames
2480		 * are still pending on the queue.
2481		 */
2482		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2483			DPRINTF(sc, ATH_DEBUG_ANY,
2484				"%s: beacon queue %u did not stop?\n",
2485				__func__, sc->sc_bhalq);
2486		}
2487		/* NB: cabq traffic should already be queued and primed */
2488		ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2489		ath_hal_txstart(ah, sc->sc_bhalq);
2490
2491		sc->sc_stats.ast_be_xmit++;
2492	}
2493}
2494
2495static struct ath_buf *
2496ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2497{
2498	struct ath_vap *avp = ATH_VAP(vap);
2499	struct ath_txq *cabq = sc->sc_cabq;
2500	struct ath_buf *bf;
2501	struct mbuf *m;
2502	int nmcastq, error;
2503
2504	KASSERT(vap->iv_state >= IEEE80211_S_RUN,
2505	    ("not running, state %d", vap->iv_state));
2506	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2507
2508	/*
2509	 * Update dynamic beacon contents.  If this returns
2510	 * non-zero then we need to remap the memory because
2511	 * the beacon frame changed size (probably because
2512	 * of the TIM bitmap).
2513	 */
2514	bf = avp->av_bcbuf;
2515	m = bf->bf_m;
2516	nmcastq = avp->av_mcastq.axq_depth;
2517	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2518		/* XXX too conservative? */
2519		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2520		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2521					     bf->bf_segs, &bf->bf_nseg,
2522					     BUS_DMA_NOWAIT);
2523		if (error != 0) {
2524			if_printf(vap->iv_ifp,
2525			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2526			    __func__, error);
2527			return NULL;
2528		}
2529	}
2530	if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
2531		DPRINTF(sc, ATH_DEBUG_BEACON,
2532		    "%s: cabq did not drain, mcastq %u cabq %u\n",
2533		    __func__, nmcastq, cabq->axq_depth);
2534		sc->sc_stats.ast_cabq_busy++;
2535		if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
2536			/*
2537			 * CABQ traffic from a previous vap is still pending.
2538			 * We must drain the q before this beacon frame goes
2539			 * out as otherwise this vap's stations will get cab
2540			 * frames from a different vap.
2541			 * XXX could be slow causing us to miss DBA
2542			 */
2543			ath_tx_draintxq(sc, cabq);
2544		}
2545	}
2546	ath_beacon_setup(sc, bf);
2547	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2548
2549	/*
2550	 * Enable the CAB queue before the beacon queue to
2551	 * insure cab frames are triggered by this beacon.
2552	 */
2553	if (avp->av_boff.bo_tim[4] & 1) {
2554		struct ath_hal *ah = sc->sc_ah;
2555
2556		/* NB: only at DTIM */
2557		ATH_TXQ_LOCK(cabq);
2558		ATH_TXQ_LOCK(&avp->av_mcastq);
2559		if (nmcastq) {
2560			struct ath_buf *bfm;
2561
2562			/*
2563			 * Move frames from the s/w mcast q to the h/w cab q.
2564			 * XXX MORE_DATA bit
2565			 */
2566			bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
2567			if (cabq->axq_link != NULL) {
2568				*cabq->axq_link = bfm->bf_daddr;
2569			} else
2570				ath_hal_puttxbuf(ah, cabq->axq_qnum,
2571					bfm->bf_daddr);
2572			ath_txqmove(cabq, &avp->av_mcastq);
2573
2574			sc->sc_stats.ast_cabq_xmit += nmcastq;
2575		}
2576		/* NB: gated by beacon so safe to start here */
2577		ath_hal_txstart(ah, cabq->axq_qnum);
2578		ATH_TXQ_UNLOCK(cabq);
2579		ATH_TXQ_UNLOCK(&avp->av_mcastq);
2580	}
2581	return bf;
2582}
2583
2584static void
2585ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
2586{
2587	struct ath_vap *avp = ATH_VAP(vap);
2588	struct ath_hal *ah = sc->sc_ah;
2589	struct ath_buf *bf;
2590	struct mbuf *m;
2591	int error;
2592
2593	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2594
2595	/*
2596	 * Update dynamic beacon contents.  If this returns
2597	 * non-zero then we need to remap the memory because
2598	 * the beacon frame changed size (probably because
2599	 * of the TIM bitmap).
2600	 */
2601	bf = avp->av_bcbuf;
2602	m = bf->bf_m;
2603	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
2604		/* XXX too conservative? */
2605		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2606		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2607					     bf->bf_segs, &bf->bf_nseg,
2608					     BUS_DMA_NOWAIT);
2609		if (error != 0) {
2610			if_printf(vap->iv_ifp,
2611			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2612			    __func__, error);
2613			return;
2614		}
2615	}
2616	ath_beacon_setup(sc, bf);
2617	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2618
2619	/* NB: caller is known to have already stopped tx dma */
2620	ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
2621	ath_hal_txstart(ah, sc->sc_bhalq);
2622}
2623
2624/*
2625 * Reset the hardware after detecting beacons have stopped.
2626 */
2627static void
2628ath_bstuck_proc(void *arg, int pending)
2629{
2630	struct ath_softc *sc = arg;
2631	struct ifnet *ifp = sc->sc_ifp;
2632
2633	if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
2634		sc->sc_bmisscount);
2635	sc->sc_stats.ast_bstuck++;
2636	ath_reset(ifp);
2637}
2638
2639/*
2640 * Reclaim beacon resources and return buffer to the pool.
2641 */
2642static void
2643ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
2644{
2645
2646	if (bf->bf_m != NULL) {
2647		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2648		m_freem(bf->bf_m);
2649		bf->bf_m = NULL;
2650	}
2651	if (bf->bf_node != NULL) {
2652		ieee80211_free_node(bf->bf_node);
2653		bf->bf_node = NULL;
2654	}
2655	STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
2656}
2657
2658/*
2659 * Reclaim beacon resources.
2660 */
2661static void
2662ath_beacon_free(struct ath_softc *sc)
2663{
2664	struct ath_buf *bf;
2665
2666	STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
2667		if (bf->bf_m != NULL) {
2668			bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2669			m_freem(bf->bf_m);
2670			bf->bf_m = NULL;
2671		}
2672		if (bf->bf_node != NULL) {
2673			ieee80211_free_node(bf->bf_node);
2674			bf->bf_node = NULL;
2675		}
2676	}
2677}
2678
2679/*
2680 * Configure the beacon and sleep timers.
2681 *
2682 * When operating as an AP this resets the TSF and sets
2683 * up the hardware to notify us when we need to issue beacons.
2684 *
2685 * When operating in station mode this sets up the beacon
2686 * timers according to the timestamp of the last received
2687 * beacon and the current TSF, configures PCF and DTIM
2688 * handling, programs the sleep registers so the hardware
2689 * will wakeup in time to receive beacons, and configures
2690 * the beacon miss handling so we'll receive a BMISS
2691 * interrupt when we stop seeing beacons from the AP
2692 * we've associated with.
2693 */
2694static void
2695ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
2696{
2697#define	TSF_TO_TU(_h,_l) \
2698	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
2699#define	FUDGE	2
2700	struct ath_hal *ah = sc->sc_ah;
2701	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2702	struct ieee80211_node *ni;
2703	u_int32_t nexttbtt, intval, tsftu;
2704	u_int64_t tsf;
2705
2706	if (vap == NULL)
2707		vap = TAILQ_FIRST(&ic->ic_vaps);	/* XXX */
2708	ni = vap->iv_bss;
2709
2710	/* extract tstamp from last beacon and convert to TU */
2711	nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
2712			     LE_READ_4(ni->ni_tstamp.data));
2713	if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2714	    ic->ic_opmode == IEEE80211_M_MBSS) {
2715		/*
2716		 * For multi-bss ap/mesh support beacons are either staggered
2717		 * evenly over N slots or burst together.  For the former
2718		 * arrange for the SWBA to be delivered for each slot.
2719		 * Slots that are not occupied will generate nothing.
2720		 */
2721		/* NB: the beacon interval is kept internally in TU's */
2722		intval = ni->ni_intval & HAL_BEACON_PERIOD;
2723		if (sc->sc_stagbeacons)
2724			intval /= ATH_BCBUF;
2725	} else {
2726		/* NB: the beacon interval is kept internally in TU's */
2727		intval = ni->ni_intval & HAL_BEACON_PERIOD;
2728	}
2729	if (nexttbtt == 0)		/* e.g. for ap mode */
2730		nexttbtt = intval;
2731	else if (intval)		/* NB: can be 0 for monitor mode */
2732		nexttbtt = roundup(nexttbtt, intval);
2733	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
2734		__func__, nexttbtt, intval, ni->ni_intval);
2735	if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
2736		HAL_BEACON_STATE bs;
2737		int dtimperiod, dtimcount;
2738		int cfpperiod, cfpcount;
2739
2740		/*
2741		 * Setup dtim and cfp parameters according to
2742		 * last beacon we received (which may be none).
2743		 */
2744		dtimperiod = ni->ni_dtim_period;
2745		if (dtimperiod <= 0)		/* NB: 0 if not known */
2746			dtimperiod = 1;
2747		dtimcount = ni->ni_dtim_count;
2748		if (dtimcount >= dtimperiod)	/* NB: sanity check */
2749			dtimcount = 0;		/* XXX? */
2750		cfpperiod = 1;			/* NB: no PCF support yet */
2751		cfpcount = 0;
2752		/*
2753		 * Pull nexttbtt forward to reflect the current
2754		 * TSF and calculate dtim+cfp state for the result.
2755		 */
2756		tsf = ath_hal_gettsf64(ah);
2757		tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
2758		do {
2759			nexttbtt += intval;
2760			if (--dtimcount < 0) {
2761				dtimcount = dtimperiod - 1;
2762				if (--cfpcount < 0)
2763					cfpcount = cfpperiod - 1;
2764			}
2765		} while (nexttbtt < tsftu);
2766		memset(&bs, 0, sizeof(bs));
2767		bs.bs_intval = intval;
2768		bs.bs_nexttbtt = nexttbtt;
2769		bs.bs_dtimperiod = dtimperiod*intval;
2770		bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
2771		bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
2772		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
2773		bs.bs_cfpmaxduration = 0;
2774#if 0
2775		/*
2776		 * The 802.11 layer records the offset to the DTIM
2777		 * bitmap while receiving beacons; use it here to
2778		 * enable h/w detection of our AID being marked in
2779		 * the bitmap vector (to indicate frames for us are
2780		 * pending at the AP).
2781		 * XXX do DTIM handling in s/w to WAR old h/w bugs
2782		 * XXX enable based on h/w rev for newer chips
2783		 */
2784		bs.bs_timoffset = ni->ni_timoff;
2785#endif
2786		/*
2787		 * Calculate the number of consecutive beacons to miss
2788		 * before taking a BMISS interrupt.
2789		 * Note that we clamp the result to at most 10 beacons.
2790		 */
2791		bs.bs_bmissthreshold = vap->iv_bmissthreshold;
2792		if (bs.bs_bmissthreshold > 10)
2793			bs.bs_bmissthreshold = 10;
2794		else if (bs.bs_bmissthreshold <= 0)
2795			bs.bs_bmissthreshold = 1;
2796
2797		/*
2798		 * Calculate sleep duration.  The configuration is
2799		 * given in ms.  We insure a multiple of the beacon
2800		 * period is used.  Also, if the sleep duration is
2801		 * greater than the DTIM period then it makes senses
2802		 * to make it a multiple of that.
2803		 *
2804		 * XXX fixed at 100ms
2805		 */
2806		bs.bs_sleepduration =
2807			roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
2808		if (bs.bs_sleepduration > bs.bs_dtimperiod)
2809			bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
2810
2811		DPRINTF(sc, ATH_DEBUG_BEACON,
2812			"%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
2813			, __func__
2814			, tsf, tsftu
2815			, bs.bs_intval
2816			, bs.bs_nexttbtt
2817			, bs.bs_dtimperiod
2818			, bs.bs_nextdtim
2819			, bs.bs_bmissthreshold
2820			, bs.bs_sleepduration
2821			, bs.bs_cfpperiod
2822			, bs.bs_cfpmaxduration
2823			, bs.bs_cfpnext
2824			, bs.bs_timoffset
2825		);
2826		ath_hal_intrset(ah, 0);
2827		ath_hal_beacontimers(ah, &bs);
2828		sc->sc_imask |= HAL_INT_BMISS;
2829		ath_hal_intrset(ah, sc->sc_imask);
2830	} else {
2831		ath_hal_intrset(ah, 0);
2832		if (nexttbtt == intval)
2833			intval |= HAL_BEACON_RESET_TSF;
2834		if (ic->ic_opmode == IEEE80211_M_IBSS) {
2835			/*
2836			 * In IBSS mode enable the beacon timers but only
2837			 * enable SWBA interrupts if we need to manually
2838			 * prepare beacon frames.  Otherwise we use a
2839			 * self-linked tx descriptor and let the hardware
2840			 * deal with things.
2841			 */
2842			intval |= HAL_BEACON_ENA;
2843			if (!sc->sc_hasveol)
2844				sc->sc_imask |= HAL_INT_SWBA;
2845			if ((intval & HAL_BEACON_RESET_TSF) == 0) {
2846				/*
2847				 * Pull nexttbtt forward to reflect
2848				 * the current TSF.
2849				 */
2850				tsf = ath_hal_gettsf64(ah);
2851				tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
2852				do {
2853					nexttbtt += intval;
2854				} while (nexttbtt < tsftu);
2855			}
2856			ath_beaconq_config(sc);
2857		} else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2858		    ic->ic_opmode == IEEE80211_M_MBSS) {
2859			/*
2860			 * In AP/mesh mode we enable the beacon timers
2861			 * and SWBA interrupts to prepare beacon frames.
2862			 */
2863			intval |= HAL_BEACON_ENA;
2864			sc->sc_imask |= HAL_INT_SWBA;	/* beacon prepare */
2865			ath_beaconq_config(sc);
2866		}
2867		ath_hal_beaconinit(ah, nexttbtt, intval);
2868		sc->sc_bmisscount = 0;
2869		ath_hal_intrset(ah, sc->sc_imask);
2870		/*
2871		 * When using a self-linked beacon descriptor in
2872		 * ibss mode load it once here.
2873		 */
2874		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
2875			ath_beacon_start_adhoc(sc, vap);
2876	}
2877	sc->sc_syncbeacon = 0;
2878#undef FUDGE
2879#undef TSF_TO_TU
2880}
2881
2882static void
2883ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2884{
2885	bus_addr_t *paddr = (bus_addr_t*) arg;
2886	KASSERT(error == 0, ("error %u on bus_dma callback", error));
2887	*paddr = segs->ds_addr;
2888}
2889
2890static int
2891ath_descdma_setup(struct ath_softc *sc,
2892	struct ath_descdma *dd, ath_bufhead *head,
2893	const char *name, int nbuf, int ndesc)
2894{
2895#define	DS2PHYS(_dd, _ds) \
2896	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2897	struct ifnet *ifp = sc->sc_ifp;
2898	struct ath_desc *ds;
2899	struct ath_buf *bf;
2900	int i, bsize, error;
2901
2902	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
2903	    __func__, name, nbuf, ndesc);
2904
2905	dd->dd_name = name;
2906	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2907
2908	/*
2909	 * Setup DMA descriptor area.
2910	 */
2911	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
2912		       PAGE_SIZE, 0,		/* alignment, bounds */
2913		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2914		       BUS_SPACE_MAXADDR,	/* highaddr */
2915		       NULL, NULL,		/* filter, filterarg */
2916		       dd->dd_desc_len,		/* maxsize */
2917		       1,			/* nsegments */
2918		       dd->dd_desc_len,		/* maxsegsize */
2919		       BUS_DMA_ALLOCNOW,	/* flags */
2920		       NULL,			/* lockfunc */
2921		       NULL,			/* lockarg */
2922		       &dd->dd_dmat);
2923	if (error != 0) {
2924		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2925		return error;
2926	}
2927
2928	/* allocate descriptors */
2929	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2930	if (error != 0) {
2931		if_printf(ifp, "unable to create dmamap for %s descriptors, "
2932			"error %u\n", dd->dd_name, error);
2933		goto fail0;
2934	}
2935
2936	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2937				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2938				 &dd->dd_dmamap);
2939	if (error != 0) {
2940		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2941			"error %u\n", nbuf * ndesc, dd->dd_name, error);
2942		goto fail1;
2943	}
2944
2945	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2946				dd->dd_desc, dd->dd_desc_len,
2947				ath_load_cb, &dd->dd_desc_paddr,
2948				BUS_DMA_NOWAIT);
2949	if (error != 0) {
2950		if_printf(ifp, "unable to map %s descriptors, error %u\n",
2951			dd->dd_name, error);
2952		goto fail2;
2953	}
2954
2955	ds = dd->dd_desc;
2956	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2957	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2958	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2959
2960	/* allocate rx buffers */
2961	bsize = sizeof(struct ath_buf) * nbuf;
2962	bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
2963	if (bf == NULL) {
2964		if_printf(ifp, "malloc of %s buffers failed, size %u\n",
2965			dd->dd_name, bsize);
2966		goto fail3;
2967	}
2968	dd->dd_bufptr = bf;
2969
2970	STAILQ_INIT(head);
2971	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
2972		bf->bf_desc = ds;
2973		bf->bf_daddr = DS2PHYS(dd, ds);
2974		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2975				&bf->bf_dmamap);
2976		if (error != 0) {
2977			if_printf(ifp, "unable to create dmamap for %s "
2978				"buffer %u, error %u\n", dd->dd_name, i, error);
2979			ath_descdma_cleanup(sc, dd, head);
2980			return error;
2981		}
2982		STAILQ_INSERT_TAIL(head, bf, bf_list);
2983	}
2984	return 0;
2985fail3:
2986	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2987fail2:
2988	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2989fail1:
2990	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2991fail0:
2992	bus_dma_tag_destroy(dd->dd_dmat);
2993	memset(dd, 0, sizeof(*dd));
2994	return error;
2995#undef DS2PHYS
2996}
2997
2998static void
2999ath_descdma_cleanup(struct ath_softc *sc,
3000	struct ath_descdma *dd, ath_bufhead *head)
3001{
3002	struct ath_buf *bf;
3003	struct ieee80211_node *ni;
3004
3005	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3006	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3007	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3008	bus_dma_tag_destroy(dd->dd_dmat);
3009
3010	STAILQ_FOREACH(bf, head, bf_list) {
3011		if (bf->bf_m) {
3012			m_freem(bf->bf_m);
3013			bf->bf_m = NULL;
3014		}
3015		if (bf->bf_dmamap != NULL) {
3016			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3017			bf->bf_dmamap = NULL;
3018		}
3019		ni = bf->bf_node;
3020		bf->bf_node = NULL;
3021		if (ni != NULL) {
3022			/*
3023			 * Reclaim node reference.
3024			 */
3025			ieee80211_free_node(ni);
3026		}
3027	}
3028
3029	STAILQ_INIT(head);
3030	free(dd->dd_bufptr, M_ATHDEV);
3031	memset(dd, 0, sizeof(*dd));
3032}
3033
3034static int
3035ath_desc_alloc(struct ath_softc *sc)
3036{
3037	int error;
3038
3039	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3040			"rx", ath_rxbuf, 1);
3041	if (error != 0)
3042		return error;
3043
3044	error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3045			"tx", ath_txbuf, ATH_TXDESC);
3046	if (error != 0) {
3047		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3048		return error;
3049	}
3050
3051	error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3052			"beacon", ATH_BCBUF, 1);
3053	if (error != 0) {
3054		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3055		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3056		return error;
3057	}
3058	return 0;
3059}
3060
3061static void
3062ath_desc_free(struct ath_softc *sc)
3063{
3064
3065	if (sc->sc_bdma.dd_desc_len != 0)
3066		ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3067	if (sc->sc_txdma.dd_desc_len != 0)
3068		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3069	if (sc->sc_rxdma.dd_desc_len != 0)
3070		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3071}
3072
3073static struct ieee80211_node *
3074ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3075{
3076	struct ieee80211com *ic = vap->iv_ic;
3077	struct ath_softc *sc = ic->ic_ifp->if_softc;
3078	const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3079	struct ath_node *an;
3080
3081	an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3082	if (an == NULL) {
3083		/* XXX stat+msg */
3084		return NULL;
3085	}
3086	ath_rate_node_init(sc, an);
3087
3088	DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3089	return &an->an_node;
3090}
3091
3092static void
3093ath_node_free(struct ieee80211_node *ni)
3094{
3095	struct ieee80211com *ic = ni->ni_ic;
3096        struct ath_softc *sc = ic->ic_ifp->if_softc;
3097
3098	DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3099
3100	ath_rate_node_cleanup(sc, ATH_NODE(ni));
3101	sc->sc_node_free(ni);
3102}
3103
3104static void
3105ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3106{
3107	struct ieee80211com *ic = ni->ni_ic;
3108	struct ath_softc *sc = ic->ic_ifp->if_softc;
3109	struct ath_hal *ah = sc->sc_ah;
3110
3111	*rssi = ic->ic_node_getrssi(ni);
3112	if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3113		*noise = ath_hal_getchannoise(ah, ni->ni_chan);
3114	else
3115		*noise = -95;		/* nominally correct */
3116}
3117
3118static int
3119ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3120{
3121	struct ath_hal *ah = sc->sc_ah;
3122	int error;
3123	struct mbuf *m;
3124	struct ath_desc *ds;
3125
3126	m = bf->bf_m;
3127	if (m == NULL) {
3128		/*
3129		 * NB: by assigning a page to the rx dma buffer we
3130		 * implicitly satisfy the Atheros requirement that
3131		 * this buffer be cache-line-aligned and sized to be
3132		 * multiple of the cache line size.  Not doing this
3133		 * causes weird stuff to happen (for the 5210 at least).
3134		 */
3135		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3136		if (m == NULL) {
3137			DPRINTF(sc, ATH_DEBUG_ANY,
3138				"%s: no mbuf/cluster\n", __func__);
3139			sc->sc_stats.ast_rx_nombuf++;
3140			return ENOMEM;
3141		}
3142		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3143
3144		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3145					     bf->bf_dmamap, m,
3146					     bf->bf_segs, &bf->bf_nseg,
3147					     BUS_DMA_NOWAIT);
3148		if (error != 0) {
3149			DPRINTF(sc, ATH_DEBUG_ANY,
3150			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3151			    __func__, error);
3152			sc->sc_stats.ast_rx_busdma++;
3153			m_freem(m);
3154			return error;
3155		}
3156		KASSERT(bf->bf_nseg == 1,
3157			("multi-segment packet; nseg %u", bf->bf_nseg));
3158		bf->bf_m = m;
3159	}
3160	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3161
3162	/*
3163	 * Setup descriptors.  For receive we always terminate
3164	 * the descriptor list with a self-linked entry so we'll
3165	 * not get overrun under high load (as can happen with a
3166	 * 5212 when ANI processing enables PHY error frames).
3167	 *
3168	 * To insure the last descriptor is self-linked we create
3169	 * each descriptor as self-linked and add it to the end.  As
3170	 * each additional descriptor is added the previous self-linked
3171	 * entry is ``fixed'' naturally.  This should be safe even
3172	 * if DMA is happening.  When processing RX interrupts we
3173	 * never remove/process the last, self-linked, entry on the
3174	 * descriptor list.  This insures the hardware always has
3175	 * someplace to write a new frame.
3176	 */
3177	/*
3178	 * 11N: we can no longer afford to self link the last descriptor.
3179	 * MAC acknowledges BA status as long as it copies frames to host
3180	 * buffer (or rx fifo). This can incorrectly acknowledge packets
3181	 * to a sender if last desc is self-linked.
3182	 */
3183	ds = bf->bf_desc;
3184	if (sc->sc_rxslink)
3185		ds->ds_link = bf->bf_daddr;	/* link to self */
3186	else
3187		ds->ds_link = 0;		/* terminate the list */
3188	ds->ds_data = bf->bf_segs[0].ds_addr;
3189	ath_hal_setuprxdesc(ah, ds
3190		, m->m_len		/* buffer size */
3191		, 0
3192	);
3193
3194	if (sc->sc_rxlink != NULL)
3195		*sc->sc_rxlink = bf->bf_daddr;
3196	sc->sc_rxlink = &ds->ds_link;
3197	return 0;
3198}
3199
3200/*
3201 * Extend 15-bit time stamp from rx descriptor to
3202 * a full 64-bit TSF using the specified TSF.
3203 */
3204static __inline u_int64_t
3205ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3206{
3207	if ((tsf & 0x7fff) < rstamp)
3208		tsf -= 0x8000;
3209	return ((tsf &~ 0x7fff) | rstamp);
3210}
3211
3212/*
3213 * Intercept management frames to collect beacon rssi data
3214 * and to do ibss merges.
3215 */
3216static void
3217ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3218	int subtype, int rssi, int nf)
3219{
3220	struct ieee80211vap *vap = ni->ni_vap;
3221	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3222
3223	/*
3224	 * Call up first so subsequent work can use information
3225	 * potentially stored in the node (e.g. for ibss merge).
3226	 */
3227	ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
3228	switch (subtype) {
3229	case IEEE80211_FC0_SUBTYPE_BEACON:
3230		/* update rssi statistics for use by the hal */
3231		ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3232		if (sc->sc_syncbeacon &&
3233		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3234			/*
3235			 * Resync beacon timers using the tsf of the beacon
3236			 * frame we just received.
3237			 */
3238			ath_beacon_config(sc, vap);
3239		}
3240		/* fall thru... */
3241	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3242		if (vap->iv_opmode == IEEE80211_M_IBSS &&
3243		    vap->iv_state == IEEE80211_S_RUN) {
3244			uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
3245			uint64_t tsf = ath_extend_tsf(rstamp,
3246				ath_hal_gettsf64(sc->sc_ah));
3247			/*
3248			 * Handle ibss merge as needed; check the tsf on the
3249			 * frame before attempting the merge.  The 802.11 spec
3250			 * says the station should change it's bssid to match
3251			 * the oldest station with the same ssid, where oldest
3252			 * is determined by the tsf.  Note that hardware
3253			 * reconfiguration happens through callback to
3254			 * ath_newstate as the state machine will go from
3255			 * RUN -> RUN when this happens.
3256			 */
3257			if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3258				DPRINTF(sc, ATH_DEBUG_STATE,
3259				    "ibss merge, rstamp %u tsf %ju "
3260				    "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3261				    (uintmax_t)ni->ni_tstamp.tsf);
3262				(void) ieee80211_ibss_merge(ni);
3263			}
3264		}
3265		break;
3266	}
3267}
3268
3269/*
3270 * Set the default antenna.
3271 */
3272static void
3273ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3274{
3275	struct ath_hal *ah = sc->sc_ah;
3276
3277	/* XXX block beacon interrupts */
3278	ath_hal_setdefantenna(ah, antenna);
3279	if (sc->sc_defant != antenna)
3280		sc->sc_stats.ast_ant_defswitch++;
3281	sc->sc_defant = antenna;
3282	sc->sc_rxotherant = 0;
3283}
3284
3285static void
3286ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3287	const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3288{
3289#define	CHAN_HT20	htole32(IEEE80211_CHAN_HT20)
3290#define	CHAN_HT40U	htole32(IEEE80211_CHAN_HT40U)
3291#define	CHAN_HT40D	htole32(IEEE80211_CHAN_HT40D)
3292#define	CHAN_HT		(CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3293	struct ath_softc *sc = ifp->if_softc;
3294	const HAL_RATE_TABLE *rt;
3295	uint8_t rix;
3296
3297	rt = sc->sc_currates;
3298	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3299	rix = rt->rateCodeToIndex[rs->rs_rate];
3300	sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3301	sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3302#ifdef AH_SUPPORT_AR5416
3303	sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3304	if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) {	/* HT rate */
3305		struct ieee80211com *ic = ifp->if_l2com;
3306
3307		if ((rs->rs_flags & HAL_RX_2040) == 0)
3308			sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
3309		else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
3310			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
3311		else
3312			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
3313		if ((rs->rs_flags & HAL_RX_GI) == 0)
3314			sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
3315	}
3316#endif
3317	sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
3318	if (rs->rs_status & HAL_RXERR_CRC)
3319		sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3320	/* XXX propagate other error flags from descriptor */
3321	sc->sc_rx_th.wr_antnoise = nf;
3322	sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
3323	sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3324#undef CHAN_HT
3325#undef CHAN_HT20
3326#undef CHAN_HT40U
3327#undef CHAN_HT40D
3328}
3329
3330static void
3331ath_handle_micerror(struct ieee80211com *ic,
3332	struct ieee80211_frame *wh, int keyix)
3333{
3334	struct ieee80211_node *ni;
3335
3336	/* XXX recheck MIC to deal w/ chips that lie */
3337	/* XXX discard MIC errors on !data frames */
3338	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
3339	if (ni != NULL) {
3340		ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
3341		ieee80211_free_node(ni);
3342	}
3343}
3344
3345static void
3346ath_rx_proc(void *arg, int npending)
3347{
3348#define	PA2DESC(_sc, _pa) \
3349	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3350		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3351	struct ath_softc *sc = arg;
3352	struct ath_buf *bf;
3353	struct ifnet *ifp = sc->sc_ifp;
3354	struct ieee80211com *ic = ifp->if_l2com;
3355	struct ath_hal *ah = sc->sc_ah;
3356	struct ath_desc *ds;
3357	struct ath_rx_status *rs;
3358	struct mbuf *m;
3359	struct ieee80211_node *ni;
3360	int len, type, ngood;
3361	HAL_STATUS status;
3362	int16_t nf;
3363	u_int64_t tsf;
3364
3365	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
3366	ngood = 0;
3367	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
3368	sc->sc_stats.ast_rx_noise = nf;
3369	tsf = ath_hal_gettsf64(ah);
3370	do {
3371		bf = STAILQ_FIRST(&sc->sc_rxbuf);
3372		if (sc->sc_rxslink && bf == NULL) {	/* NB: shouldn't happen */
3373			if_printf(ifp, "%s: no buffer!\n", __func__);
3374			break;
3375		} else if (bf == NULL) {
3376			/*
3377			 * End of List:
3378			 * this can happen for non-self-linked RX chains
3379			 */
3380			sc->sc_stats.ast_rx_hitqueueend++;
3381			break;
3382		}
3383		m = bf->bf_m;
3384		if (m == NULL) {		/* NB: shouldn't happen */
3385			/*
3386			 * If mbuf allocation failed previously there
3387			 * will be no mbuf; try again to re-populate it.
3388			 */
3389			/* XXX make debug msg */
3390			if_printf(ifp, "%s: no mbuf!\n", __func__);
3391			STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3392			goto rx_next;
3393		}
3394		ds = bf->bf_desc;
3395		if (ds->ds_link == bf->bf_daddr) {
3396			/* NB: never process the self-linked entry at the end */
3397			sc->sc_stats.ast_rx_hitqueueend++;
3398			break;
3399		}
3400		/* XXX sync descriptor memory */
3401		/*
3402		 * Must provide the virtual address of the current
3403		 * descriptor, the physical address, and the virtual
3404		 * address of the next descriptor in the h/w chain.
3405		 * This allows the HAL to look ahead to see if the
3406		 * hardware is done with a descriptor by checking the
3407		 * done bit in the following descriptor and the address
3408		 * of the current descriptor the DMA engine is working
3409		 * on.  All this is necessary because of our use of
3410		 * a self-linked list to avoid rx overruns.
3411		 */
3412		rs = &bf->bf_status.ds_rxstat;
3413		status = ath_hal_rxprocdesc(ah, ds,
3414				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
3415#ifdef ATH_DEBUG
3416		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
3417			ath_printrxbuf(sc, bf, 0, status == HAL_OK);
3418#endif
3419		if (status == HAL_EINPROGRESS)
3420			break;
3421		STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3422
3423		/* These aren't specifically errors */
3424		if (rs->rs_flags & HAL_RX_GI)
3425			sc->sc_stats.ast_rx_halfgi++;
3426		if (rs->rs_flags & HAL_RX_2040)
3427			sc->sc_stats.ast_rx_2040++;
3428		if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
3429			sc->sc_stats.ast_rx_pre_crc_err++;
3430		if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
3431			sc->sc_stats.ast_rx_post_crc_err++;
3432		if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
3433			sc->sc_stats.ast_rx_decrypt_busy_err++;
3434		if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
3435			sc->sc_stats.ast_rx_hi_rx_chain++;
3436
3437		if (rs->rs_status != 0) {
3438			if (rs->rs_status & HAL_RXERR_CRC)
3439				sc->sc_stats.ast_rx_crcerr++;
3440			if (rs->rs_status & HAL_RXERR_FIFO)
3441				sc->sc_stats.ast_rx_fifoerr++;
3442			if (rs->rs_status & HAL_RXERR_PHY) {
3443				sc->sc_stats.ast_rx_phyerr++;
3444				/* Be suitably paranoid about receiving phy errors out of the stats array bounds */
3445				if (rs->rs_phyerr < 64)
3446					sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
3447				goto rx_error;	/* NB: don't count in ierrors */
3448			}
3449			if (rs->rs_status & HAL_RXERR_DECRYPT) {
3450				/*
3451				 * Decrypt error.  If the error occurred
3452				 * because there was no hardware key, then
3453				 * let the frame through so the upper layers
3454				 * can process it.  This is necessary for 5210
3455				 * parts which have no way to setup a ``clear''
3456				 * key cache entry.
3457				 *
3458				 * XXX do key cache faulting
3459				 */
3460				if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
3461					goto rx_accept;
3462				sc->sc_stats.ast_rx_badcrypt++;
3463			}
3464			if (rs->rs_status & HAL_RXERR_MIC) {
3465				sc->sc_stats.ast_rx_badmic++;
3466				/*
3467				 * Do minimal work required to hand off
3468				 * the 802.11 header for notification.
3469				 */
3470				/* XXX frag's and qos frames */
3471				len = rs->rs_datalen;
3472				if (len >= sizeof (struct ieee80211_frame)) {
3473					bus_dmamap_sync(sc->sc_dmat,
3474					    bf->bf_dmamap,
3475					    BUS_DMASYNC_POSTREAD);
3476					ath_handle_micerror(ic,
3477					    mtod(m, struct ieee80211_frame *),
3478					    sc->sc_splitmic ?
3479						rs->rs_keyix-32 : rs->rs_keyix);
3480				}
3481			}
3482			ifp->if_ierrors++;
3483rx_error:
3484			/*
3485			 * Cleanup any pending partial frame.
3486			 */
3487			if (sc->sc_rxpending != NULL) {
3488				m_freem(sc->sc_rxpending);
3489				sc->sc_rxpending = NULL;
3490			}
3491			/*
3492			 * When a tap is present pass error frames
3493			 * that have been requested.  By default we
3494			 * pass decrypt+mic errors but others may be
3495			 * interesting (e.g. crc).
3496			 */
3497			if (ieee80211_radiotap_active(ic) &&
3498			    (rs->rs_status & sc->sc_monpass)) {
3499				bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3500				    BUS_DMASYNC_POSTREAD);
3501				/* NB: bpf needs the mbuf length setup */
3502				len = rs->rs_datalen;
3503				m->m_pkthdr.len = m->m_len = len;
3504				ath_rx_tap(ifp, m, rs, tsf, nf);
3505				ieee80211_radiotap_rx_all(ic, m);
3506			}
3507			/* XXX pass MIC errors up for s/w reclaculation */
3508			goto rx_next;
3509		}
3510rx_accept:
3511		/*
3512		 * Sync and unmap the frame.  At this point we're
3513		 * committed to passing the mbuf somewhere so clear
3514		 * bf_m; this means a new mbuf must be allocated
3515		 * when the rx descriptor is setup again to receive
3516		 * another frame.
3517		 */
3518		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3519		    BUS_DMASYNC_POSTREAD);
3520		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3521		bf->bf_m = NULL;
3522
3523		len = rs->rs_datalen;
3524		m->m_len = len;
3525
3526		if (rs->rs_more) {
3527			/*
3528			 * Frame spans multiple descriptors; save
3529			 * it for the next completed descriptor, it
3530			 * will be used to construct a jumbogram.
3531			 */
3532			if (sc->sc_rxpending != NULL) {
3533				/* NB: max frame size is currently 2 clusters */
3534				sc->sc_stats.ast_rx_toobig++;
3535				m_freem(sc->sc_rxpending);
3536			}
3537			m->m_pkthdr.rcvif = ifp;
3538			m->m_pkthdr.len = len;
3539			sc->sc_rxpending = m;
3540			goto rx_next;
3541		} else if (sc->sc_rxpending != NULL) {
3542			/*
3543			 * This is the second part of a jumbogram,
3544			 * chain it to the first mbuf, adjust the
3545			 * frame length, and clear the rxpending state.
3546			 */
3547			sc->sc_rxpending->m_next = m;
3548			sc->sc_rxpending->m_pkthdr.len += len;
3549			m = sc->sc_rxpending;
3550			sc->sc_rxpending = NULL;
3551		} else {
3552			/*
3553			 * Normal single-descriptor receive; setup
3554			 * the rcvif and packet length.
3555			 */
3556			m->m_pkthdr.rcvif = ifp;
3557			m->m_pkthdr.len = len;
3558		}
3559
3560		ifp->if_ipackets++;
3561		sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
3562
3563		/*
3564		 * Populate the rx status block.  When there are bpf
3565		 * listeners we do the additional work to provide
3566		 * complete status.  Otherwise we fill in only the
3567		 * material required by ieee80211_input.  Note that
3568		 * noise setting is filled in above.
3569		 */
3570		if (ieee80211_radiotap_active(ic))
3571			ath_rx_tap(ifp, m, rs, tsf, nf);
3572
3573		/*
3574		 * From this point on we assume the frame is at least
3575		 * as large as ieee80211_frame_min; verify that.
3576		 */
3577		if (len < IEEE80211_MIN_LEN) {
3578			if (!ieee80211_radiotap_active(ic)) {
3579				DPRINTF(sc, ATH_DEBUG_RECV,
3580				    "%s: short packet %d\n", __func__, len);
3581				sc->sc_stats.ast_rx_tooshort++;
3582			} else {
3583				/* NB: in particular this captures ack's */
3584				ieee80211_radiotap_rx_all(ic, m);
3585			}
3586			m_freem(m);
3587			goto rx_next;
3588		}
3589
3590		if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
3591			const HAL_RATE_TABLE *rt = sc->sc_currates;
3592			uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
3593
3594			ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
3595			    sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
3596		}
3597
3598		m_adj(m, -IEEE80211_CRC_LEN);
3599
3600		/*
3601		 * Locate the node for sender, track state, and then
3602		 * pass the (referenced) node up to the 802.11 layer
3603		 * for its use.
3604		 */
3605		ni = ieee80211_find_rxnode_withkey(ic,
3606			mtod(m, const struct ieee80211_frame_min *),
3607			rs->rs_keyix == HAL_RXKEYIX_INVALID ?
3608				IEEE80211_KEYIX_NONE : rs->rs_keyix);
3609		sc->sc_lastrs = rs;
3610
3611		if (rs->rs_isaggr)
3612			sc->sc_stats.ast_rx_agg++;
3613
3614		if (ni != NULL) {
3615			/*
3616 			 * Only punt packets for ampdu reorder processing for
3617			 * 11n nodes; net80211 enforces that M_AMPDU is only
3618			 * set for 11n nodes.
3619 			 */
3620			if (ni->ni_flags & IEEE80211_NODE_HT)
3621				m->m_flags |= M_AMPDU;
3622
3623			/*
3624			 * Sending station is known, dispatch directly.
3625			 */
3626			type = ieee80211_input(ni, m, rs->rs_rssi, nf);
3627			ieee80211_free_node(ni);
3628			/*
3629			 * Arrange to update the last rx timestamp only for
3630			 * frames from our ap when operating in station mode.
3631			 * This assumes the rx key is always setup when
3632			 * associated.
3633			 */
3634			if (ic->ic_opmode == IEEE80211_M_STA &&
3635			    rs->rs_keyix != HAL_RXKEYIX_INVALID)
3636				ngood++;
3637		} else {
3638			type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
3639		}
3640		/*
3641		 * Track rx rssi and do any rx antenna management.
3642		 */
3643		ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
3644		if (sc->sc_diversity) {
3645			/*
3646			 * When using fast diversity, change the default rx
3647			 * antenna if diversity chooses the other antenna 3
3648			 * times in a row.
3649			 */
3650			if (sc->sc_defant != rs->rs_antenna) {
3651				if (++sc->sc_rxotherant >= 3)
3652					ath_setdefantenna(sc, rs->rs_antenna);
3653			} else
3654				sc->sc_rxotherant = 0;
3655		}
3656
3657		/* Newer school diversity - kite specific for now */
3658		/* XXX perhaps migrate the normal diversity code to this? */
3659		if ((ah)->ah_rxAntCombDiversity)
3660			(*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz);
3661
3662		if (sc->sc_softled) {
3663			/*
3664			 * Blink for any data frame.  Otherwise do a
3665			 * heartbeat-style blink when idle.  The latter
3666			 * is mainly for station mode where we depend on
3667			 * periodic beacon frames to trigger the poll event.
3668			 */
3669			if (type == IEEE80211_FC0_TYPE_DATA) {
3670				const HAL_RATE_TABLE *rt = sc->sc_currates;
3671				ath_led_event(sc,
3672				    rt->rateCodeToIndex[rs->rs_rate]);
3673			} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
3674				ath_led_event(sc, 0);
3675		}
3676rx_next:
3677		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
3678	} while (ath_rxbuf_init(sc, bf) == 0);
3679
3680	/* rx signal state monitoring */
3681	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
3682	if (ngood)
3683		sc->sc_lastrx = tsf;
3684
3685	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
3686#ifdef IEEE80211_SUPPORT_SUPERG
3687		ieee80211_ff_age_all(ic, 100);
3688#endif
3689		if (!IFQ_IS_EMPTY(&ifp->if_snd))
3690			ath_start(ifp);
3691	}
3692#undef PA2DESC
3693}
3694
3695static void
3696ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3697{
3698	txq->axq_qnum = qnum;
3699	txq->axq_ac = 0;
3700	txq->axq_depth = 0;
3701	txq->axq_intrcnt = 0;
3702	txq->axq_link = NULL;
3703	STAILQ_INIT(&txq->axq_q);
3704	ATH_TXQ_LOCK_INIT(sc, txq);
3705}
3706
3707/*
3708 * Setup a h/w transmit queue.
3709 */
3710static struct ath_txq *
3711ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3712{
3713#define	N(a)	(sizeof(a)/sizeof(a[0]))
3714	struct ath_hal *ah = sc->sc_ah;
3715	HAL_TXQ_INFO qi;
3716	int qnum;
3717
3718	memset(&qi, 0, sizeof(qi));
3719	qi.tqi_subtype = subtype;
3720	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3721	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3722	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3723	/*
3724	 * Enable interrupts only for EOL and DESC conditions.
3725	 * We mark tx descriptors to receive a DESC interrupt
3726	 * when a tx queue gets deep; otherwise waiting for the
3727	 * EOL to reap descriptors.  Note that this is done to
3728	 * reduce interrupt load and this only defers reaping
3729	 * descriptors, never transmitting frames.  Aside from
3730	 * reducing interrupts this also permits more concurrency.
3731	 * The only potential downside is if the tx queue backs
3732	 * up in which case the top half of the kernel may backup
3733	 * due to a lack of tx descriptors.
3734	 */
3735	qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
3736	qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3737	if (qnum == -1) {
3738		/*
3739		 * NB: don't print a message, this happens
3740		 * normally on parts with too few tx queues
3741		 */
3742		return NULL;
3743	}
3744	if (qnum >= N(sc->sc_txq)) {
3745		device_printf(sc->sc_dev,
3746			"hal qnum %u out of range, max %zu!\n",
3747			qnum, N(sc->sc_txq));
3748		ath_hal_releasetxqueue(ah, qnum);
3749		return NULL;
3750	}
3751	if (!ATH_TXQ_SETUP(sc, qnum)) {
3752		ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3753		sc->sc_txqsetup |= 1<<qnum;
3754	}
3755	return &sc->sc_txq[qnum];
3756#undef N
3757}
3758
3759/*
3760 * Setup a hardware data transmit queue for the specified
3761 * access control.  The hal may not support all requested
3762 * queues in which case it will return a reference to a
3763 * previously setup queue.  We record the mapping from ac's
3764 * to h/w queues for use by ath_tx_start and also track
3765 * the set of h/w queues being used to optimize work in the
3766 * transmit interrupt handler and related routines.
3767 */
3768static int
3769ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3770{
3771#define	N(a)	(sizeof(a)/sizeof(a[0]))
3772	struct ath_txq *txq;
3773
3774	if (ac >= N(sc->sc_ac2q)) {
3775		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3776			ac, N(sc->sc_ac2q));
3777		return 0;
3778	}
3779	txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3780	if (txq != NULL) {
3781		txq->axq_ac = ac;
3782		sc->sc_ac2q[ac] = txq;
3783		return 1;
3784	} else
3785		return 0;
3786#undef N
3787}
3788
3789/*
3790 * Update WME parameters for a transmit queue.
3791 */
3792static int
3793ath_txq_update(struct ath_softc *sc, int ac)
3794{
3795#define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
3796#define	ATH_TXOP_TO_US(v)		(v<<5)
3797	struct ifnet *ifp = sc->sc_ifp;
3798	struct ieee80211com *ic = ifp->if_l2com;
3799	struct ath_txq *txq = sc->sc_ac2q[ac];
3800	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3801	struct ath_hal *ah = sc->sc_ah;
3802	HAL_TXQ_INFO qi;
3803
3804	ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3805#ifdef IEEE80211_SUPPORT_TDMA
3806	if (sc->sc_tdma) {
3807		/*
3808		 * AIFS is zero so there's no pre-transmit wait.  The
3809		 * burst time defines the slot duration and is configured
3810		 * through net80211.  The QCU is setup to not do post-xmit
3811		 * back off, lockout all lower-priority QCU's, and fire
3812		 * off the DMA beacon alert timer which is setup based
3813		 * on the slot configuration.
3814		 */
3815		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3816			      | HAL_TXQ_TXERRINT_ENABLE
3817			      | HAL_TXQ_TXURNINT_ENABLE
3818			      | HAL_TXQ_TXEOLINT_ENABLE
3819			      | HAL_TXQ_DBA_GATED
3820			      | HAL_TXQ_BACKOFF_DISABLE
3821			      | HAL_TXQ_ARB_LOCKOUT_GLOBAL
3822			      ;
3823		qi.tqi_aifs = 0;
3824		/* XXX +dbaprep? */
3825		qi.tqi_readyTime = sc->sc_tdmaslotlen;
3826		qi.tqi_burstTime = qi.tqi_readyTime;
3827	} else {
3828#endif
3829		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3830			      | HAL_TXQ_TXERRINT_ENABLE
3831			      | HAL_TXQ_TXDESCINT_ENABLE
3832			      | HAL_TXQ_TXURNINT_ENABLE
3833			      ;
3834		qi.tqi_aifs = wmep->wmep_aifsn;
3835		qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3836		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3837		qi.tqi_readyTime = 0;
3838		qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3839#ifdef IEEE80211_SUPPORT_TDMA
3840	}
3841#endif
3842
3843	DPRINTF(sc, ATH_DEBUG_RESET,
3844	    "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
3845	    __func__, txq->axq_qnum, qi.tqi_qflags,
3846	    qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
3847
3848	if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3849		if_printf(ifp, "unable to update hardware queue "
3850			"parameters for %s traffic!\n",
3851			ieee80211_wme_acnames[ac]);
3852		return 0;
3853	} else {
3854		ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3855		return 1;
3856	}
3857#undef ATH_TXOP_TO_US
3858#undef ATH_EXPONENT_TO_VALUE
3859}
3860
3861/*
3862 * Callback from the 802.11 layer to update WME parameters.
3863 */
3864static int
3865ath_wme_update(struct ieee80211com *ic)
3866{
3867	struct ath_softc *sc = ic->ic_ifp->if_softc;
3868
3869	return !ath_txq_update(sc, WME_AC_BE) ||
3870	    !ath_txq_update(sc, WME_AC_BK) ||
3871	    !ath_txq_update(sc, WME_AC_VI) ||
3872	    !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3873}
3874
3875/*
3876 * Reclaim resources for a setup queue.
3877 */
3878static void
3879ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3880{
3881
3882	ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3883	ATH_TXQ_LOCK_DESTROY(txq);
3884	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3885}
3886
3887/*
3888 * Reclaim all tx queue resources.
3889 */
3890static void
3891ath_tx_cleanup(struct ath_softc *sc)
3892{
3893	int i;
3894
3895	ATH_TXBUF_LOCK_DESTROY(sc);
3896	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3897		if (ATH_TXQ_SETUP(sc, i))
3898			ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3899}
3900
3901/*
3902 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
3903 * using the current rates in sc_rixmap.
3904 */
3905int
3906ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
3907{
3908	int rix = sc->sc_rixmap[rate];
3909	/* NB: return lowest rix for invalid rate */
3910	return (rix == 0xff ? 0 : rix);
3911}
3912
3913/*
3914 * Process completed xmit descriptors from the specified queue.
3915 */
3916static int
3917ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
3918{
3919	struct ath_hal *ah = sc->sc_ah;
3920	struct ifnet *ifp = sc->sc_ifp;
3921	struct ieee80211com *ic = ifp->if_l2com;
3922	struct ath_buf *bf, *last;
3923	struct ath_desc *ds, *ds0;
3924	struct ath_tx_status *ts;
3925	struct ieee80211_node *ni;
3926	struct ath_node *an;
3927	int sr, lr, pri, nacked;
3928	HAL_STATUS status;
3929
3930	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
3931		__func__, txq->axq_qnum,
3932		(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3933		txq->axq_link);
3934	nacked = 0;
3935	for (;;) {
3936		ATH_TXQ_LOCK(txq);
3937		txq->axq_intrcnt = 0;	/* reset periodic desc intr count */
3938		bf = STAILQ_FIRST(&txq->axq_q);
3939		if (bf == NULL) {
3940			ATH_TXQ_UNLOCK(txq);
3941			break;
3942		}
3943		ds0 = &bf->bf_desc[0];
3944		ds = &bf->bf_desc[bf->bf_nseg - 1];
3945		ts = &bf->bf_status.ds_txstat;
3946		status = ath_hal_txprocdesc(ah, ds, ts);
3947#ifdef ATH_DEBUG
3948		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
3949			ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3950			    status == HAL_OK);
3951#endif
3952		if (status == HAL_EINPROGRESS) {
3953			ATH_TXQ_UNLOCK(txq);
3954			break;
3955		}
3956		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
3957#ifdef IEEE80211_SUPPORT_TDMA
3958		if (txq->axq_depth > 0) {
3959			/*
3960			 * More frames follow.  Mark the buffer busy
3961			 * so it's not re-used while the hardware may
3962			 * still re-read the link field in the descriptor.
3963			 */
3964			bf->bf_flags |= ATH_BUF_BUSY;
3965		} else
3966#else
3967		if (txq->axq_depth == 0)
3968#endif
3969			txq->axq_link = NULL;
3970		ATH_TXQ_UNLOCK(txq);
3971
3972		ni = bf->bf_node;
3973		if (ni != NULL) {
3974			an = ATH_NODE(ni);
3975			if (ts->ts_status == 0) {
3976				u_int8_t txant = ts->ts_antenna;
3977				sc->sc_stats.ast_ant_tx[txant]++;
3978				sc->sc_ant_tx[txant]++;
3979				if (ts->ts_finaltsi != 0)
3980					sc->sc_stats.ast_tx_altrate++;
3981				pri = M_WME_GETAC(bf->bf_m);
3982				if (pri >= WME_AC_VO)
3983					ic->ic_wme.wme_hipri_traffic++;
3984				if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
3985					ni->ni_inact = ni->ni_inact_reload;
3986			} else {
3987				if (ts->ts_status & HAL_TXERR_XRETRY)
3988					sc->sc_stats.ast_tx_xretries++;
3989				if (ts->ts_status & HAL_TXERR_FIFO)
3990					sc->sc_stats.ast_tx_fifoerr++;
3991				if (ts->ts_status & HAL_TXERR_FILT)
3992					sc->sc_stats.ast_tx_filtered++;
3993				if (ts->ts_status & HAL_TXERR_XTXOP)
3994					sc->sc_stats.ast_tx_xtxop++;
3995				if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
3996					sc->sc_stats.ast_tx_timerexpired++;
3997
3998				/* XXX HAL_TX_DATA_UNDERRUN */
3999				/* XXX HAL_TX_DELIM_UNDERRUN */
4000
4001				if (bf->bf_m->m_flags & M_FF)
4002					sc->sc_stats.ast_ff_txerr++;
4003			}
4004			/* XXX when is this valid? */
4005			if (ts->ts_status & HAL_TX_DESC_CFG_ERR)
4006				sc->sc_stats.ast_tx_desccfgerr++;
4007
4008			sr = ts->ts_shortretry;
4009			lr = ts->ts_longretry;
4010			sc->sc_stats.ast_tx_shortretry += sr;
4011			sc->sc_stats.ast_tx_longretry += lr;
4012			/*
4013			 * Hand the descriptor to the rate control algorithm.
4014			 */
4015			if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4016			    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
4017				/*
4018				 * If frame was ack'd update statistics,
4019				 * including the last rx time used to
4020				 * workaround phantom bmiss interrupts.
4021				 */
4022				if (ts->ts_status == 0) {
4023					nacked++;
4024					sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4025					ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4026						ts->ts_rssi);
4027				}
4028				ath_rate_tx_complete(sc, an, bf);
4029			}
4030			/*
4031			 * Do any tx complete callback.  Note this must
4032			 * be done before releasing the node reference.
4033			 */
4034			if (bf->bf_m->m_flags & M_TXCB)
4035				ieee80211_process_callback(ni, bf->bf_m,
4036				    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
4037				        ts->ts_status : HAL_TXERR_XRETRY);
4038			ieee80211_free_node(ni);
4039		}
4040		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4041		    BUS_DMASYNC_POSTWRITE);
4042		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4043
4044		m_freem(bf->bf_m);
4045		bf->bf_m = NULL;
4046		bf->bf_node = NULL;
4047
4048		ATH_TXBUF_LOCK(sc);
4049		last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
4050		if (last != NULL)
4051			last->bf_flags &= ~ATH_BUF_BUSY;
4052		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4053		ATH_TXBUF_UNLOCK(sc);
4054	}
4055#ifdef IEEE80211_SUPPORT_SUPERG
4056	/*
4057	 * Flush fast-frame staging queue when traffic slows.
4058	 */
4059	if (txq->axq_depth <= 1)
4060		ieee80211_ff_flush(ic, txq->axq_ac);
4061#endif
4062	return nacked;
4063}
4064
4065static __inline int
4066txqactive(struct ath_hal *ah, int qnum)
4067{
4068	u_int32_t txqs = 1<<qnum;
4069	ath_hal_gettxintrtxqs(ah, &txqs);
4070	return (txqs & (1<<qnum));
4071}
4072
4073/*
4074 * Deferred processing of transmit interrupt; special-cased
4075 * for a single hardware transmit queue (e.g. 5210 and 5211).
4076 */
4077static void
4078ath_tx_proc_q0(void *arg, int npending)
4079{
4080	struct ath_softc *sc = arg;
4081	struct ifnet *ifp = sc->sc_ifp;
4082
4083	if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
4084		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4085	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
4086		ath_tx_processq(sc, sc->sc_cabq);
4087	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4088	sc->sc_wd_timer = 0;
4089
4090	if (sc->sc_softled)
4091		ath_led_event(sc, sc->sc_txrix);
4092
4093	ath_start(ifp);
4094}
4095
4096/*
4097 * Deferred processing of transmit interrupt; special-cased
4098 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4099 */
4100static void
4101ath_tx_proc_q0123(void *arg, int npending)
4102{
4103	struct ath_softc *sc = arg;
4104	struct ifnet *ifp = sc->sc_ifp;
4105	int nacked;
4106
4107	/*
4108	 * Process each active queue.
4109	 */
4110	nacked = 0;
4111	if (txqactive(sc->sc_ah, 0))
4112		nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
4113	if (txqactive(sc->sc_ah, 1))
4114		nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
4115	if (txqactive(sc->sc_ah, 2))
4116		nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
4117	if (txqactive(sc->sc_ah, 3))
4118		nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
4119	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
4120		ath_tx_processq(sc, sc->sc_cabq);
4121	if (nacked)
4122		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4123
4124	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4125	sc->sc_wd_timer = 0;
4126
4127	if (sc->sc_softled)
4128		ath_led_event(sc, sc->sc_txrix);
4129
4130	ath_start(ifp);
4131}
4132
4133/*
4134 * Deferred processing of transmit interrupt.
4135 */
4136static void
4137ath_tx_proc(void *arg, int npending)
4138{
4139	struct ath_softc *sc = arg;
4140	struct ifnet *ifp = sc->sc_ifp;
4141	int i, nacked;
4142
4143	/*
4144	 * Process each active queue.
4145	 */
4146	nacked = 0;
4147	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4148		if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
4149			nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
4150	if (nacked)
4151		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4152
4153	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4154	sc->sc_wd_timer = 0;
4155
4156	if (sc->sc_softled)
4157		ath_led_event(sc, sc->sc_txrix);
4158
4159	ath_start(ifp);
4160}
4161
4162static void
4163ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4164{
4165#ifdef ATH_DEBUG
4166	struct ath_hal *ah = sc->sc_ah;
4167#endif
4168	struct ieee80211_node *ni;
4169	struct ath_buf *bf;
4170	u_int ix;
4171
4172	/*
4173	 * NB: this assumes output has been stopped and
4174	 *     we do not need to block ath_tx_proc
4175	 */
4176	ATH_TXBUF_LOCK(sc);
4177	bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
4178	if (bf != NULL)
4179		bf->bf_flags &= ~ATH_BUF_BUSY;
4180	ATH_TXBUF_UNLOCK(sc);
4181	for (ix = 0;; ix++) {
4182		ATH_TXQ_LOCK(txq);
4183		bf = STAILQ_FIRST(&txq->axq_q);
4184		if (bf == NULL) {
4185			txq->axq_link = NULL;
4186			ATH_TXQ_UNLOCK(txq);
4187			break;
4188		}
4189		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4190		ATH_TXQ_UNLOCK(txq);
4191#ifdef ATH_DEBUG
4192		if (sc->sc_debug & ATH_DEBUG_RESET) {
4193			struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4194
4195			ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
4196				ath_hal_txprocdesc(ah, bf->bf_desc,
4197				    &bf->bf_status.ds_txstat) == HAL_OK);
4198			ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4199			    bf->bf_m->m_len, 0, -1);
4200		}
4201#endif /* ATH_DEBUG */
4202		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4203		ni = bf->bf_node;
4204		bf->bf_node = NULL;
4205		if (ni != NULL) {
4206			/*
4207			 * Do any callback and reclaim the node reference.
4208			 */
4209			if (bf->bf_m->m_flags & M_TXCB)
4210				ieee80211_process_callback(ni, bf->bf_m, -1);
4211			ieee80211_free_node(ni);
4212		}
4213		m_freem(bf->bf_m);
4214		bf->bf_m = NULL;
4215		bf->bf_flags &= ~ATH_BUF_BUSY;
4216
4217		ATH_TXBUF_LOCK(sc);
4218		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4219		ATH_TXBUF_UNLOCK(sc);
4220	}
4221}
4222
4223static void
4224ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4225{
4226	struct ath_hal *ah = sc->sc_ah;
4227
4228	DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4229	    __func__, txq->axq_qnum,
4230	    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4231	    txq->axq_link);
4232	(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4233}
4234
4235/*
4236 * Drain the transmit queues and reclaim resources.
4237 */
4238static void
4239ath_draintxq(struct ath_softc *sc)
4240{
4241	struct ath_hal *ah = sc->sc_ah;
4242	struct ifnet *ifp = sc->sc_ifp;
4243	int i;
4244
4245	/* XXX return value */
4246	if (!sc->sc_invalid) {
4247		/* don't touch the hardware if marked invalid */
4248		DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4249		    __func__, sc->sc_bhalq,
4250		    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4251		    NULL);
4252		(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4253		for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4254			if (ATH_TXQ_SETUP(sc, i))
4255				ath_tx_stopdma(sc, &sc->sc_txq[i]);
4256	}
4257	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4258		if (ATH_TXQ_SETUP(sc, i))
4259			ath_tx_draintxq(sc, &sc->sc_txq[i]);
4260#ifdef ATH_DEBUG
4261	if (sc->sc_debug & ATH_DEBUG_RESET) {
4262		struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
4263		if (bf != NULL && bf->bf_m != NULL) {
4264			ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
4265				ath_hal_txprocdesc(ah, bf->bf_desc,
4266				    &bf->bf_status.ds_txstat) == HAL_OK);
4267			ieee80211_dump_pkt(ifp->if_l2com,
4268			    mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
4269			    0, -1);
4270		}
4271	}
4272#endif /* ATH_DEBUG */
4273	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4274	sc->sc_wd_timer = 0;
4275}
4276
4277/*
4278 * Disable the receive h/w in preparation for a reset.
4279 */
4280static void
4281ath_stoprecv(struct ath_softc *sc)
4282{
4283#define	PA2DESC(_sc, _pa) \
4284	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4285		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4286	struct ath_hal *ah = sc->sc_ah;
4287
4288	ath_hal_stoppcurecv(ah);	/* disable PCU */
4289	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
4290	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
4291	DELAY(3000);			/* 3ms is long enough for 1 frame */
4292#ifdef ATH_DEBUG
4293	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
4294		struct ath_buf *bf;
4295		u_int ix;
4296
4297		printf("%s: rx queue %p, link %p\n", __func__,
4298			(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
4299		ix = 0;
4300		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
4301			struct ath_desc *ds = bf->bf_desc;
4302			struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
4303			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
4304				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4305			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
4306				ath_printrxbuf(sc, bf, ix, status == HAL_OK);
4307			ix++;
4308		}
4309	}
4310#endif
4311	if (sc->sc_rxpending != NULL) {
4312		m_freem(sc->sc_rxpending);
4313		sc->sc_rxpending = NULL;
4314	}
4315	sc->sc_rxlink = NULL;		/* just in case */
4316#undef PA2DESC
4317}
4318
4319/*
4320 * Enable the receive h/w following a reset.
4321 */
4322static int
4323ath_startrecv(struct ath_softc *sc)
4324{
4325	struct ath_hal *ah = sc->sc_ah;
4326	struct ath_buf *bf;
4327
4328	sc->sc_rxlink = NULL;
4329	sc->sc_rxpending = NULL;
4330	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
4331		int error = ath_rxbuf_init(sc, bf);
4332		if (error != 0) {
4333			DPRINTF(sc, ATH_DEBUG_RECV,
4334				"%s: ath_rxbuf_init failed %d\n",
4335				__func__, error);
4336			return error;
4337		}
4338	}
4339
4340	bf = STAILQ_FIRST(&sc->sc_rxbuf);
4341	ath_hal_putrxbuf(ah, bf->bf_daddr);
4342	ath_hal_rxena(ah);		/* enable recv descriptors */
4343	ath_mode_init(sc);		/* set filters, etc. */
4344	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
4345	return 0;
4346}
4347
4348/*
4349 * Update internal state after a channel change.
4350 */
4351static void
4352ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4353{
4354	enum ieee80211_phymode mode;
4355
4356	/*
4357	 * Change channels and update the h/w rate map
4358	 * if we're switching; e.g. 11a to 11b/g.
4359	 */
4360	mode = ieee80211_chan2mode(chan);
4361	if (mode != sc->sc_curmode)
4362		ath_setcurmode(sc, mode);
4363	sc->sc_curchan = chan;
4364}
4365
4366/*
4367 * Set/change channels.  If the channel is really being changed,
4368 * it's done by resetting the chip.  To accomplish this we must
4369 * first cleanup any pending DMA, then restart stuff after a la
4370 * ath_init.
4371 */
4372static int
4373ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
4374{
4375	struct ifnet *ifp = sc->sc_ifp;
4376	struct ieee80211com *ic = ifp->if_l2com;
4377	struct ath_hal *ah = sc->sc_ah;
4378
4379	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
4380	    __func__, ieee80211_chan2ieee(ic, chan),
4381	    chan->ic_freq, chan->ic_flags);
4382	if (chan != sc->sc_curchan) {
4383		HAL_STATUS status;
4384		/*
4385		 * To switch channels clear any pending DMA operations;
4386		 * wait long enough for the RX fifo to drain, reset the
4387		 * hardware at the new frequency, and then re-enable
4388		 * the relevant bits of the h/w.
4389		 */
4390		ath_hal_intrset(ah, 0);		/* disable interrupts */
4391		ath_draintxq(sc);		/* clear pending tx frames */
4392		ath_stoprecv(sc);		/* turn off frame recv */
4393		if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
4394			if_printf(ifp, "%s: unable to reset "
4395			    "channel %u (%u MHz, flags 0x%x), hal status %u\n",
4396			    __func__, ieee80211_chan2ieee(ic, chan),
4397			    chan->ic_freq, chan->ic_flags, status);
4398			return EIO;
4399		}
4400		sc->sc_diversity = ath_hal_getdiversity(ah);
4401
4402		/*
4403		 * Re-enable rx framework.
4404		 */
4405		if (ath_startrecv(sc) != 0) {
4406			if_printf(ifp, "%s: unable to restart recv logic\n",
4407			    __func__);
4408			return EIO;
4409		}
4410
4411		/*
4412		 * Change channels and update the h/w rate map
4413		 * if we're switching; e.g. 11a to 11b/g.
4414		 */
4415		ath_chan_change(sc, chan);
4416
4417		/*
4418		 * Re-enable interrupts.
4419		 */
4420		ath_hal_intrset(ah, sc->sc_imask);
4421	}
4422	return 0;
4423}
4424
4425/*
4426 * Periodically recalibrate the PHY to account
4427 * for temperature/environment changes.
4428 */
4429static void
4430ath_calibrate(void *arg)
4431{
4432	struct ath_softc *sc = arg;
4433	struct ath_hal *ah = sc->sc_ah;
4434	struct ifnet *ifp = sc->sc_ifp;
4435	struct ieee80211com *ic = ifp->if_l2com;
4436	HAL_BOOL longCal, isCalDone;
4437	HAL_BOOL aniCal, shortCal = AH_FALSE;
4438	int nextcal;
4439
4440	if (ic->ic_flags & IEEE80211_F_SCAN)	/* defer, off channel */
4441		goto restart;
4442	longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
4443	aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
4444	if (sc->sc_doresetcal)
4445		shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
4446
4447	DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
4448	if (aniCal) {
4449		sc->sc_stats.ast_ani_cal++;
4450		sc->sc_lastani = ticks;
4451		ath_hal_ani_poll(ah, sc->sc_curchan);
4452	}
4453
4454	if (longCal) {
4455		sc->sc_stats.ast_per_cal++;
4456		sc->sc_lastlongcal = ticks;
4457		if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
4458			/*
4459			 * Rfgain is out of bounds, reset the chip
4460			 * to load new gain values.
4461			 */
4462			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4463				"%s: rfgain change\n", __func__);
4464			sc->sc_stats.ast_per_rfgain++;
4465			ath_reset(ifp);
4466		}
4467		/*
4468		 * If this long cal is after an idle period, then
4469		 * reset the data collection state so we start fresh.
4470		 */
4471		if (sc->sc_resetcal) {
4472			(void) ath_hal_calreset(ah, sc->sc_curchan);
4473			sc->sc_lastcalreset = ticks;
4474			sc->sc_lastshortcal = ticks;
4475			sc->sc_resetcal = 0;
4476			sc->sc_doresetcal = AH_TRUE;
4477		}
4478	}
4479
4480	/* Only call if we're doing a short/long cal, not for ANI calibration */
4481	if (shortCal || longCal) {
4482		if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
4483			if (longCal) {
4484				/*
4485				 * Calibrate noise floor data again in case of change.
4486				 */
4487				ath_hal_process_noisefloor(ah);
4488			}
4489		} else {
4490			DPRINTF(sc, ATH_DEBUG_ANY,
4491				"%s: calibration of channel %u failed\n",
4492				__func__, sc->sc_curchan->ic_freq);
4493			sc->sc_stats.ast_per_calfail++;
4494		}
4495		if (shortCal)
4496			sc->sc_lastshortcal = ticks;
4497	}
4498	if (!isCalDone) {
4499restart:
4500		/*
4501		 * Use a shorter interval to potentially collect multiple
4502		 * data samples required to complete calibration.  Once
4503		 * we're told the work is done we drop back to a longer
4504		 * interval between requests.  We're more aggressive doing
4505		 * work when operating as an AP to improve operation right
4506		 * after startup.
4507		 */
4508		sc->sc_lastshortcal = ticks;
4509		nextcal = ath_shortcalinterval*hz/1000;
4510		if (sc->sc_opmode != HAL_M_HOSTAP)
4511			nextcal *= 10;
4512		sc->sc_doresetcal = AH_TRUE;
4513	} else {
4514		/* nextcal should be the shortest time for next event */
4515		nextcal = ath_longcalinterval*hz;
4516		if (sc->sc_lastcalreset == 0)
4517			sc->sc_lastcalreset = sc->sc_lastlongcal;
4518		else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
4519			sc->sc_resetcal = 1;	/* setup reset next trip */
4520		sc->sc_doresetcal = AH_FALSE;
4521	}
4522	/* ANI calibration may occur more often than short/long/resetcal */
4523	if (ath_anicalinterval > 0)
4524		nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
4525
4526	if (nextcal != 0) {
4527		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
4528		    __func__, nextcal, isCalDone ? "" : "!");
4529		callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
4530	} else {
4531		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
4532		    __func__);
4533		/* NB: don't rearm timer */
4534	}
4535}
4536
4537static void
4538ath_scan_start(struct ieee80211com *ic)
4539{
4540	struct ifnet *ifp = ic->ic_ifp;
4541	struct ath_softc *sc = ifp->if_softc;
4542	struct ath_hal *ah = sc->sc_ah;
4543	u_int32_t rfilt;
4544
4545	/* XXX calibration timer? */
4546
4547	sc->sc_scanning = 1;
4548	sc->sc_syncbeacon = 0;
4549	rfilt = ath_calcrxfilter(sc);
4550	ath_hal_setrxfilter(ah, rfilt);
4551	ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
4552
4553	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
4554		 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
4555}
4556
4557static void
4558ath_scan_end(struct ieee80211com *ic)
4559{
4560	struct ifnet *ifp = ic->ic_ifp;
4561	struct ath_softc *sc = ifp->if_softc;
4562	struct ath_hal *ah = sc->sc_ah;
4563	u_int32_t rfilt;
4564
4565	sc->sc_scanning = 0;
4566	rfilt = ath_calcrxfilter(sc);
4567	ath_hal_setrxfilter(ah, rfilt);
4568	ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4569
4570	ath_hal_process_noisefloor(ah);
4571
4572	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4573		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
4574		 sc->sc_curaid);
4575}
4576
4577static void
4578ath_set_channel(struct ieee80211com *ic)
4579{
4580	struct ifnet *ifp = ic->ic_ifp;
4581	struct ath_softc *sc = ifp->if_softc;
4582
4583	(void) ath_chan_set(sc, ic->ic_curchan);
4584	/*
4585	 * If we are returning to our bss channel then mark state
4586	 * so the next recv'd beacon's tsf will be used to sync the
4587	 * beacon timers.  Note that since we only hear beacons in
4588	 * sta/ibss mode this has no effect in other operating modes.
4589	 */
4590	if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
4591		sc->sc_syncbeacon = 1;
4592}
4593
4594/*
4595 * Walk the vap list and check if there any vap's in RUN state.
4596 */
4597static int
4598ath_isanyrunningvaps(struct ieee80211vap *this)
4599{
4600	struct ieee80211com *ic = this->iv_ic;
4601	struct ieee80211vap *vap;
4602
4603	IEEE80211_LOCK_ASSERT(ic);
4604
4605	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
4606		if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
4607			return 1;
4608	}
4609	return 0;
4610}
4611
4612static int
4613ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4614{
4615	struct ieee80211com *ic = vap->iv_ic;
4616	struct ath_softc *sc = ic->ic_ifp->if_softc;
4617	struct ath_vap *avp = ATH_VAP(vap);
4618	struct ath_hal *ah = sc->sc_ah;
4619	struct ieee80211_node *ni = NULL;
4620	int i, error, stamode;
4621	u_int32_t rfilt;
4622	static const HAL_LED_STATE leds[] = {
4623	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
4624	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
4625	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
4626	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
4627	    HAL_LED_RUN, 	/* IEEE80211_S_CAC */
4628	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
4629	    HAL_LED_RUN, 	/* IEEE80211_S_CSA */
4630	    HAL_LED_RUN, 	/* IEEE80211_S_SLEEP */
4631	};
4632
4633	DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
4634		ieee80211_state_name[vap->iv_state],
4635		ieee80211_state_name[nstate]);
4636
4637	callout_drain(&sc->sc_cal_ch);
4638	ath_hal_setledstate(ah, leds[nstate]);	/* set LED */
4639
4640	if (nstate == IEEE80211_S_SCAN) {
4641		/*
4642		 * Scanning: turn off beacon miss and don't beacon.
4643		 * Mark beacon state so when we reach RUN state we'll
4644		 * [re]setup beacons.  Unblock the task q thread so
4645		 * deferred interrupt processing is done.
4646		 */
4647		ath_hal_intrset(ah,
4648		    sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
4649		sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4650		sc->sc_beacons = 0;
4651		taskqueue_unblock(sc->sc_tq);
4652	}
4653
4654	ni = vap->iv_bss;
4655	rfilt = ath_calcrxfilter(sc);
4656	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
4657		   vap->iv_opmode == IEEE80211_M_AHDEMO ||
4658		   vap->iv_opmode == IEEE80211_M_IBSS);
4659	if (stamode && nstate == IEEE80211_S_RUN) {
4660		sc->sc_curaid = ni->ni_associd;
4661		IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
4662		ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4663	}
4664	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4665	   __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
4666	ath_hal_setrxfilter(ah, rfilt);
4667
4668	/* XXX is this to restore keycache on resume? */
4669	if (vap->iv_opmode != IEEE80211_M_STA &&
4670	    (vap->iv_flags & IEEE80211_F_PRIVACY)) {
4671		for (i = 0; i < IEEE80211_WEP_NKID; i++)
4672			if (ath_hal_keyisvalid(ah, i))
4673				ath_hal_keysetmac(ah, i, ni->ni_bssid);
4674	}
4675
4676	/*
4677	 * Invoke the parent method to do net80211 work.
4678	 */
4679	error = avp->av_newstate(vap, nstate, arg);
4680	if (error != 0)
4681		goto bad;
4682
4683	if (nstate == IEEE80211_S_RUN) {
4684		/* NB: collect bss node again, it may have changed */
4685		ni = vap->iv_bss;
4686
4687		DPRINTF(sc, ATH_DEBUG_STATE,
4688		    "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4689		    "capinfo 0x%04x chan %d\n", __func__,
4690		    vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
4691		    ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
4692
4693		switch (vap->iv_opmode) {
4694#ifdef IEEE80211_SUPPORT_TDMA
4695		case IEEE80211_M_AHDEMO:
4696			if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
4697				break;
4698			/* fall thru... */
4699#endif
4700		case IEEE80211_M_HOSTAP:
4701		case IEEE80211_M_IBSS:
4702		case IEEE80211_M_MBSS:
4703			/*
4704			 * Allocate and setup the beacon frame.
4705			 *
4706			 * Stop any previous beacon DMA.  This may be
4707			 * necessary, for example, when an ibss merge
4708			 * causes reconfiguration; there will be a state
4709			 * transition from RUN->RUN that means we may
4710			 * be called with beacon transmission active.
4711			 */
4712			ath_hal_stoptxdma(ah, sc->sc_bhalq);
4713
4714			error = ath_beacon_alloc(sc, ni);
4715			if (error != 0)
4716				goto bad;
4717			/*
4718			 * If joining an adhoc network defer beacon timer
4719			 * configuration to the next beacon frame so we
4720			 * have a current TSF to use.  Otherwise we're
4721			 * starting an ibss/bss so there's no need to delay;
4722			 * if this is the first vap moving to RUN state, then
4723			 * beacon state needs to be [re]configured.
4724			 */
4725			if (vap->iv_opmode == IEEE80211_M_IBSS &&
4726			    ni->ni_tstamp.tsf != 0) {
4727				sc->sc_syncbeacon = 1;
4728			} else if (!sc->sc_beacons) {
4729#ifdef IEEE80211_SUPPORT_TDMA
4730				if (vap->iv_caps & IEEE80211_C_TDMA)
4731					ath_tdma_config(sc, vap);
4732				else
4733#endif
4734					ath_beacon_config(sc, vap);
4735				sc->sc_beacons = 1;
4736			}
4737			break;
4738		case IEEE80211_M_STA:
4739			/*
4740			 * Defer beacon timer configuration to the next
4741			 * beacon frame so we have a current TSF to use
4742			 * (any TSF collected when scanning is likely old).
4743			 */
4744			sc->sc_syncbeacon = 1;
4745			break;
4746		case IEEE80211_M_MONITOR:
4747			/*
4748			 * Monitor mode vaps have only INIT->RUN and RUN->RUN
4749			 * transitions so we must re-enable interrupts here to
4750			 * handle the case of a single monitor mode vap.
4751			 */
4752			ath_hal_intrset(ah, sc->sc_imask);
4753			break;
4754		case IEEE80211_M_WDS:
4755			break;
4756		default:
4757			break;
4758		}
4759		/*
4760		 * Let the hal process statistics collected during a
4761		 * scan so it can provide calibrated noise floor data.
4762		 */
4763		ath_hal_process_noisefloor(ah);
4764		/*
4765		 * Reset rssi stats; maybe not the best place...
4766		 */
4767		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
4768		sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
4769		sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
4770		/*
4771		 * Finally, start any timers and the task q thread
4772		 * (in case we didn't go through SCAN state).
4773		 */
4774		if (ath_longcalinterval != 0) {
4775			/* start periodic recalibration timer */
4776			callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4777		} else {
4778			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4779			    "%s: calibration disabled\n", __func__);
4780		}
4781		taskqueue_unblock(sc->sc_tq);
4782	} else if (nstate == IEEE80211_S_INIT) {
4783		/*
4784		 * If there are no vaps left in RUN state then
4785		 * shutdown host/driver operation:
4786		 * o disable interrupts
4787		 * o disable the task queue thread
4788		 * o mark beacon processing as stopped
4789		 */
4790		if (!ath_isanyrunningvaps(vap)) {
4791			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4792			/* disable interrupts  */
4793			ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
4794			taskqueue_block(sc->sc_tq);
4795			sc->sc_beacons = 0;
4796		}
4797#ifdef IEEE80211_SUPPORT_TDMA
4798		ath_hal_setcca(ah, AH_TRUE);
4799#endif
4800	}
4801bad:
4802	return error;
4803}
4804
4805/*
4806 * Allocate a key cache slot to the station so we can
4807 * setup a mapping from key index to node. The key cache
4808 * slot is needed for managing antenna state and for
4809 * compression when stations do not use crypto.  We do
4810 * it uniliaterally here; if crypto is employed this slot
4811 * will be reassigned.
4812 */
4813static void
4814ath_setup_stationkey(struct ieee80211_node *ni)
4815{
4816	struct ieee80211vap *vap = ni->ni_vap;
4817	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4818	ieee80211_keyix keyix, rxkeyix;
4819
4820	if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
4821		/*
4822		 * Key cache is full; we'll fall back to doing
4823		 * the more expensive lookup in software.  Note
4824		 * this also means no h/w compression.
4825		 */
4826		/* XXX msg+statistic */
4827	} else {
4828		/* XXX locking? */
4829		ni->ni_ucastkey.wk_keyix = keyix;
4830		ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
4831		/* NB: must mark device key to get called back on delete */
4832		ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
4833		IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
4834		/* NB: this will create a pass-thru key entry */
4835		ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
4836	}
4837}
4838
4839/*
4840 * Setup driver-specific state for a newly associated node.
4841 * Note that we're called also on a re-associate, the isnew
4842 * param tells us if this is the first time or not.
4843 */
4844static void
4845ath_newassoc(struct ieee80211_node *ni, int isnew)
4846{
4847	struct ath_node *an = ATH_NODE(ni);
4848	struct ieee80211vap *vap = ni->ni_vap;
4849	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4850	const struct ieee80211_txparam *tp = ni->ni_txparms;
4851
4852	an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
4853	an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
4854
4855	ath_rate_newassoc(sc, an, isnew);
4856	if (isnew &&
4857	    (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
4858	    ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
4859		ath_setup_stationkey(ni);
4860}
4861
4862static int
4863ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
4864	int nchans, struct ieee80211_channel chans[])
4865{
4866	struct ath_softc *sc = ic->ic_ifp->if_softc;
4867	struct ath_hal *ah = sc->sc_ah;
4868	HAL_STATUS status;
4869
4870	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4871	    "%s: rd %u cc %u location %c%s\n",
4872	    __func__, reg->regdomain, reg->country, reg->location,
4873	    reg->ecm ? " ecm" : "");
4874
4875	status = ath_hal_set_channels(ah, chans, nchans,
4876	    reg->country, reg->regdomain);
4877	if (status != HAL_OK) {
4878		DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
4879		    __func__, status);
4880		return EINVAL;		/* XXX */
4881	}
4882	return 0;
4883}
4884
4885static void
4886ath_getradiocaps(struct ieee80211com *ic,
4887	int maxchans, int *nchans, struct ieee80211_channel chans[])
4888{
4889	struct ath_softc *sc = ic->ic_ifp->if_softc;
4890	struct ath_hal *ah = sc->sc_ah;
4891
4892	DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
4893	    __func__, SKU_DEBUG, CTRY_DEFAULT);
4894
4895	/* XXX check return */
4896	(void) ath_hal_getchannels(ah, chans, maxchans, nchans,
4897	    HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
4898
4899}
4900
4901static int
4902ath_getchannels(struct ath_softc *sc)
4903{
4904	struct ifnet *ifp = sc->sc_ifp;
4905	struct ieee80211com *ic = ifp->if_l2com;
4906	struct ath_hal *ah = sc->sc_ah;
4907	HAL_STATUS status;
4908
4909	/*
4910	 * Collect channel set based on EEPROM contents.
4911	 */
4912	status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
4913	    &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
4914	if (status != HAL_OK) {
4915		if_printf(ifp, "%s: unable to collect channel list from hal, "
4916		    "status %d\n", __func__, status);
4917		return EINVAL;
4918	}
4919	(void) ath_hal_getregdomain(ah, &sc->sc_eerd);
4920	ath_hal_getcountrycode(ah, &sc->sc_eecc);	/* NB: cannot fail */
4921	/* XXX map Atheros sku's to net80211 SKU's */
4922	/* XXX net80211 types too small */
4923	ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
4924	ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
4925	ic->ic_regdomain.isocc[0] = ' ';	/* XXX don't know */
4926	ic->ic_regdomain.isocc[1] = ' ';
4927
4928	ic->ic_regdomain.ecm = 1;
4929	ic->ic_regdomain.location = 'I';
4930
4931	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
4932	    "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
4933	    __func__, sc->sc_eerd, sc->sc_eecc,
4934	    ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
4935	    ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
4936	return 0;
4937}
4938
4939static void
4940ath_led_done(void *arg)
4941{
4942	struct ath_softc *sc = arg;
4943
4944	sc->sc_blinking = 0;
4945}
4946
4947/*
4948 * Turn the LED off: flip the pin and then set a timer so no
4949 * update will happen for the specified duration.
4950 */
4951static void
4952ath_led_off(void *arg)
4953{
4954	struct ath_softc *sc = arg;
4955
4956	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
4957	callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
4958}
4959
4960/*
4961 * Blink the LED according to the specified on/off times.
4962 */
4963static void
4964ath_led_blink(struct ath_softc *sc, int on, int off)
4965{
4966	DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
4967	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
4968	sc->sc_blinking = 1;
4969	sc->sc_ledoff = off;
4970	callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
4971}
4972
4973static void
4974ath_led_event(struct ath_softc *sc, int rix)
4975{
4976	sc->sc_ledevent = ticks;	/* time of last event */
4977	if (sc->sc_blinking)		/* don't interrupt active blink */
4978		return;
4979	ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
4980}
4981
4982static int
4983ath_rate_setup(struct ath_softc *sc, u_int mode)
4984{
4985	struct ath_hal *ah = sc->sc_ah;
4986	const HAL_RATE_TABLE *rt;
4987
4988	switch (mode) {
4989	case IEEE80211_MODE_11A:
4990		rt = ath_hal_getratetable(ah, HAL_MODE_11A);
4991		break;
4992	case IEEE80211_MODE_HALF:
4993		rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
4994		break;
4995	case IEEE80211_MODE_QUARTER:
4996		rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
4997		break;
4998	case IEEE80211_MODE_11B:
4999		rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5000		break;
5001	case IEEE80211_MODE_11G:
5002		rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5003		break;
5004	case IEEE80211_MODE_TURBO_A:
5005		rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5006		break;
5007	case IEEE80211_MODE_TURBO_G:
5008		rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5009		break;
5010	case IEEE80211_MODE_STURBO_A:
5011		rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5012		break;
5013	case IEEE80211_MODE_11NA:
5014		rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5015		break;
5016	case IEEE80211_MODE_11NG:
5017		rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5018		break;
5019	default:
5020		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5021			__func__, mode);
5022		return 0;
5023	}
5024	sc->sc_rates[mode] = rt;
5025	return (rt != NULL);
5026}
5027
5028static void
5029ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5030{
5031#define	N(a)	(sizeof(a)/sizeof(a[0]))
5032	/* NB: on/off times from the Atheros NDIS driver, w/ permission */
5033	static const struct {
5034		u_int		rate;		/* tx/rx 802.11 rate */
5035		u_int16_t	timeOn;		/* LED on time (ms) */
5036		u_int16_t	timeOff;	/* LED off time (ms) */
5037	} blinkrates[] = {
5038		{ 108,  40,  10 },
5039		{  96,  44,  11 },
5040		{  72,  50,  13 },
5041		{  48,  57,  14 },
5042		{  36,  67,  16 },
5043		{  24,  80,  20 },
5044		{  22, 100,  25 },
5045		{  18, 133,  34 },
5046		{  12, 160,  40 },
5047		{  10, 200,  50 },
5048		{   6, 240,  58 },
5049		{   4, 267,  66 },
5050		{   2, 400, 100 },
5051		{   0, 500, 130 },
5052		/* XXX half/quarter rates */
5053	};
5054	const HAL_RATE_TABLE *rt;
5055	int i, j;
5056
5057	memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5058	rt = sc->sc_rates[mode];
5059	KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5060	for (i = 0; i < rt->rateCount; i++) {
5061		uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5062		if (rt->info[i].phy != IEEE80211_T_HT)
5063			sc->sc_rixmap[ieeerate] = i;
5064		else
5065			sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5066	}
5067	memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5068	for (i = 0; i < N(sc->sc_hwmap); i++) {
5069		if (i >= rt->rateCount) {
5070			sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5071			sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5072			continue;
5073		}
5074		sc->sc_hwmap[i].ieeerate =
5075			rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5076		if (rt->info[i].phy == IEEE80211_T_HT)
5077			sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5078		sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5079		if (rt->info[i].shortPreamble ||
5080		    rt->info[i].phy == IEEE80211_T_OFDM)
5081			sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5082		sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5083		for (j = 0; j < N(blinkrates)-1; j++)
5084			if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5085				break;
5086		/* NB: this uses the last entry if the rate isn't found */
5087		/* XXX beware of overlow */
5088		sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5089		sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5090	}
5091	sc->sc_currates = rt;
5092	sc->sc_curmode = mode;
5093	/*
5094	 * All protection frames are transmited at 2Mb/s for
5095	 * 11g, otherwise at 1Mb/s.
5096	 */
5097	if (mode == IEEE80211_MODE_11G)
5098		sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5099	else
5100		sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5101	/* NB: caller is responsible for resetting rate control state */
5102#undef N
5103}
5104
5105static void
5106ath_watchdog(void *arg)
5107{
5108	struct ath_softc *sc = arg;
5109
5110	if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
5111		struct ifnet *ifp = sc->sc_ifp;
5112		uint32_t hangs;
5113
5114		if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
5115		    hangs != 0) {
5116			if_printf(ifp, "%s hang detected (0x%x)\n",
5117			    hangs & 0xff ? "bb" : "mac", hangs);
5118		} else
5119			if_printf(ifp, "device timeout\n");
5120		ath_reset(ifp);
5121		ifp->if_oerrors++;
5122		sc->sc_stats.ast_watchdog++;
5123	}
5124	callout_schedule(&sc->sc_wd_ch, hz);
5125}
5126
5127#ifdef ATH_DIAGAPI
5128/*
5129 * Diagnostic interface to the HAL.  This is used by various
5130 * tools to do things like retrieve register contents for
5131 * debugging.  The mechanism is intentionally opaque so that
5132 * it can change frequently w/o concern for compatiblity.
5133 */
5134static int
5135ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
5136{
5137	struct ath_hal *ah = sc->sc_ah;
5138	u_int id = ad->ad_id & ATH_DIAG_ID;
5139	void *indata = NULL;
5140	void *outdata = NULL;
5141	u_int32_t insize = ad->ad_in_size;
5142	u_int32_t outsize = ad->ad_out_size;
5143	int error = 0;
5144
5145	if (ad->ad_id & ATH_DIAG_IN) {
5146		/*
5147		 * Copy in data.
5148		 */
5149		indata = malloc(insize, M_TEMP, M_NOWAIT);
5150		if (indata == NULL) {
5151			error = ENOMEM;
5152			goto bad;
5153		}
5154		error = copyin(ad->ad_in_data, indata, insize);
5155		if (error)
5156			goto bad;
5157	}
5158	if (ad->ad_id & ATH_DIAG_DYN) {
5159		/*
5160		 * Allocate a buffer for the results (otherwise the HAL
5161		 * returns a pointer to a buffer where we can read the
5162		 * results).  Note that we depend on the HAL leaving this
5163		 * pointer for us to use below in reclaiming the buffer;
5164		 * may want to be more defensive.
5165		 */
5166		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
5167		if (outdata == NULL) {
5168			error = ENOMEM;
5169			goto bad;
5170		}
5171	}
5172	if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
5173		if (outsize < ad->ad_out_size)
5174			ad->ad_out_size = outsize;
5175		if (outdata != NULL)
5176			error = copyout(outdata, ad->ad_out_data,
5177					ad->ad_out_size);
5178	} else {
5179		error = EINVAL;
5180	}
5181bad:
5182	if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
5183		free(indata, M_TEMP);
5184	if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
5185		free(outdata, M_TEMP);
5186	return error;
5187}
5188#endif /* ATH_DIAGAPI */
5189
5190static int
5191ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
5192{
5193#define	IS_RUNNING(ifp) \
5194	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
5195	struct ath_softc *sc = ifp->if_softc;
5196	struct ieee80211com *ic = ifp->if_l2com;
5197	struct ifreq *ifr = (struct ifreq *)data;
5198	const HAL_RATE_TABLE *rt;
5199	int error = 0;
5200
5201	switch (cmd) {
5202	case SIOCSIFFLAGS:
5203		ATH_LOCK(sc);
5204		if (IS_RUNNING(ifp)) {
5205			/*
5206			 * To avoid rescanning another access point,
5207			 * do not call ath_init() here.  Instead,
5208			 * only reflect promisc mode settings.
5209			 */
5210			ath_mode_init(sc);
5211		} else if (ifp->if_flags & IFF_UP) {
5212			/*
5213			 * Beware of being called during attach/detach
5214			 * to reset promiscuous mode.  In that case we
5215			 * will still be marked UP but not RUNNING.
5216			 * However trying to re-init the interface
5217			 * is the wrong thing to do as we've already
5218			 * torn down much of our state.  There's
5219			 * probably a better way to deal with this.
5220			 */
5221			if (!sc->sc_invalid)
5222				ath_init(sc);	/* XXX lose error */
5223		} else {
5224			ath_stop_locked(ifp);
5225#ifdef notyet
5226			/* XXX must wakeup in places like ath_vap_delete */
5227			if (!sc->sc_invalid)
5228				ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
5229#endif
5230		}
5231		ATH_UNLOCK(sc);
5232		break;
5233	case SIOCGIFMEDIA:
5234	case SIOCSIFMEDIA:
5235		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
5236		break;
5237	case SIOCGATHSTATS:
5238		/* NB: embed these numbers to get a consistent view */
5239		sc->sc_stats.ast_tx_packets = ifp->if_opackets;
5240		sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
5241		sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
5242		sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
5243#ifdef IEEE80211_SUPPORT_TDMA
5244		sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
5245		sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
5246#endif
5247		rt = sc->sc_currates;
5248		sc->sc_stats.ast_tx_rate =
5249		    rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
5250		if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
5251			sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
5252		return copyout(&sc->sc_stats,
5253		    ifr->ifr_data, sizeof (sc->sc_stats));
5254	case SIOCZATHSTATS:
5255		error = priv_check(curthread, PRIV_DRIVER);
5256		if (error == 0)
5257			memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
5258		break;
5259#ifdef ATH_DIAGAPI
5260	case SIOCGATHDIAG:
5261		error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
5262		break;
5263#endif
5264	case SIOCGIFADDR:
5265		error = ether_ioctl(ifp, cmd, data);
5266		break;
5267	default:
5268		error = EINVAL;
5269		break;
5270	}
5271	return error;
5272#undef IS_RUNNING
5273}
5274
5275/*
5276 * Announce various information on device/driver attach.
5277 */
5278static void
5279ath_announce(struct ath_softc *sc)
5280{
5281	struct ifnet *ifp = sc->sc_ifp;
5282	struct ath_hal *ah = sc->sc_ah;
5283
5284	if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
5285		ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
5286		ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
5287	if (bootverbose) {
5288		int i;
5289		for (i = 0; i <= WME_AC_VO; i++) {
5290			struct ath_txq *txq = sc->sc_ac2q[i];
5291			if_printf(ifp, "Use hw queue %u for %s traffic\n",
5292				txq->axq_qnum, ieee80211_wme_acnames[i]);
5293		}
5294		if_printf(ifp, "Use hw queue %u for CAB traffic\n",
5295			sc->sc_cabq->axq_qnum);
5296		if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
5297	}
5298	if (ath_rxbuf != ATH_RXBUF)
5299		if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
5300	if (ath_txbuf != ATH_TXBUF)
5301		if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
5302	if (sc->sc_mcastkey && bootverbose)
5303		if_printf(ifp, "using multicast key search\n");
5304}
5305
5306#ifdef IEEE80211_SUPPORT_TDMA
5307static __inline uint32_t
5308ath_hal_getnexttbtt(struct ath_hal *ah)
5309{
5310#define	AR_TIMER0	0x8028
5311	return OS_REG_READ(ah, AR_TIMER0);
5312}
5313
5314static __inline void
5315ath_hal_adjusttsf(struct ath_hal *ah, int32_t tsfdelta)
5316{
5317	/* XXX handle wrap/overflow */
5318	OS_REG_WRITE(ah, AR_TSF_L32, OS_REG_READ(ah, AR_TSF_L32) + tsfdelta);
5319}
5320
5321static void
5322ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
5323{
5324	struct ath_hal *ah = sc->sc_ah;
5325	HAL_BEACON_TIMERS bt;
5326
5327	bt.bt_intval = bintval | HAL_BEACON_ENA;
5328	bt.bt_nexttbtt = nexttbtt;
5329	bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
5330	bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
5331	bt.bt_nextatim = nexttbtt+1;
5332	ath_hal_beaconsettimers(ah, &bt);
5333}
5334
5335/*
5336 * Calculate the beacon interval.  This is periodic in the
5337 * superframe for the bss.  We assume each station is configured
5338 * identically wrt transmit rate so the guard time we calculate
5339 * above will be the same on all stations.  Note we need to
5340 * factor in the xmit time because the hardware will schedule
5341 * a frame for transmit if the start of the frame is within
5342 * the burst time.  When we get hardware that properly kills
5343 * frames in the PCU we can reduce/eliminate the guard time.
5344 *
5345 * Roundup to 1024 is so we have 1 TU buffer in the guard time
5346 * to deal with the granularity of the nexttbtt timer.  11n MAC's
5347 * with 1us timer granularity should allow us to reduce/eliminate
5348 * this.
5349 */
5350static void
5351ath_tdma_bintvalsetup(struct ath_softc *sc,
5352	const struct ieee80211_tdma_state *tdma)
5353{
5354	/* copy from vap state (XXX check all vaps have same value?) */
5355	sc->sc_tdmaslotlen = tdma->tdma_slotlen;
5356
5357	sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
5358		tdma->tdma_slotcnt, 1024);
5359	sc->sc_tdmabintval >>= 10;		/* TSF -> TU */
5360	if (sc->sc_tdmabintval & 1)
5361		sc->sc_tdmabintval++;
5362
5363	if (tdma->tdma_slot == 0) {
5364		/*
5365		 * Only slot 0 beacons; other slots respond.
5366		 */
5367		sc->sc_imask |= HAL_INT_SWBA;
5368		sc->sc_tdmaswba = 0;		/* beacon immediately */
5369	} else {
5370		/* XXX all vaps must be slot 0 or slot !0 */
5371		sc->sc_imask &= ~HAL_INT_SWBA;
5372	}
5373}
5374
5375/*
5376 * Max 802.11 overhead.  This assumes no 4-address frames and
5377 * the encapsulation done by ieee80211_encap (llc).  We also
5378 * include potential crypto overhead.
5379 */
5380#define	IEEE80211_MAXOVERHEAD \
5381	(sizeof(struct ieee80211_qosframe) \
5382	 + sizeof(struct llc) \
5383	 + IEEE80211_ADDR_LEN \
5384	 + IEEE80211_WEP_IVLEN \
5385	 + IEEE80211_WEP_KIDLEN \
5386	 + IEEE80211_WEP_CRCLEN \
5387	 + IEEE80211_WEP_MICLEN \
5388	 + IEEE80211_CRC_LEN)
5389
5390/*
5391 * Setup initially for tdma operation.  Start the beacon
5392 * timers and enable SWBA if we are slot 0.  Otherwise
5393 * we wait for slot 0 to arrive so we can sync up before
5394 * starting to transmit.
5395 */
5396static void
5397ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
5398{
5399	struct ath_hal *ah = sc->sc_ah;
5400	struct ifnet *ifp = sc->sc_ifp;
5401	struct ieee80211com *ic = ifp->if_l2com;
5402	const struct ieee80211_txparam *tp;
5403	const struct ieee80211_tdma_state *tdma = NULL;
5404	int rix;
5405
5406	if (vap == NULL) {
5407		vap = TAILQ_FIRST(&ic->ic_vaps);   /* XXX */
5408		if (vap == NULL) {
5409			if_printf(ifp, "%s: no vaps?\n", __func__);
5410			return;
5411		}
5412	}
5413	tp = vap->iv_bss->ni_txparms;
5414	/*
5415	 * Calculate the guard time for each slot.  This is the
5416	 * time to send a maximal-size frame according to the
5417	 * fixed/lowest transmit rate.  Note that the interface
5418	 * mtu does not include the 802.11 overhead so we must
5419	 * tack that on (ath_hal_computetxtime includes the
5420	 * preamble and plcp in it's calculation).
5421	 */
5422	tdma = vap->iv_tdma;
5423	if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
5424		rix = ath_tx_findrix(sc, tp->ucastrate);
5425	else
5426		rix = ath_tx_findrix(sc, tp->mcastrate);
5427	/* XXX short preamble assumed */
5428	sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
5429		ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
5430
5431	ath_hal_intrset(ah, 0);
5432
5433	ath_beaconq_config(sc);			/* setup h/w beacon q */
5434	if (sc->sc_setcca)
5435		ath_hal_setcca(ah, AH_FALSE);	/* disable CCA */
5436	ath_tdma_bintvalsetup(sc, tdma);	/* calculate beacon interval */
5437	ath_tdma_settimers(sc, sc->sc_tdmabintval,
5438		sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
5439	sc->sc_syncbeacon = 0;
5440
5441	sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
5442	sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
5443
5444	ath_hal_intrset(ah, sc->sc_imask);
5445
5446	DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
5447	    "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
5448	    tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
5449	    tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
5450	    sc->sc_tdmadbaprep);
5451}
5452
5453/*
5454 * Update tdma operation.  Called from the 802.11 layer
5455 * when a beacon is received from the TDMA station operating
5456 * in the slot immediately preceding us in the bss.  Use
5457 * the rx timestamp for the beacon frame to update our
5458 * beacon timers so we follow their schedule.  Note that
5459 * by using the rx timestamp we implicitly include the
5460 * propagation delay in our schedule.
5461 */
5462static void
5463ath_tdma_update(struct ieee80211_node *ni,
5464	const struct ieee80211_tdma_param *tdma, int changed)
5465{
5466#define	TSF_TO_TU(_h,_l) \
5467	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
5468#define	TU_TO_TSF(_tu)	(((u_int64_t)(_tu)) << 10)
5469	struct ieee80211vap *vap = ni->ni_vap;
5470	struct ieee80211com *ic = ni->ni_ic;
5471	struct ath_softc *sc = ic->ic_ifp->if_softc;
5472	struct ath_hal *ah = sc->sc_ah;
5473	const HAL_RATE_TABLE *rt = sc->sc_currates;
5474	u_int64_t tsf, rstamp, nextslot;
5475	u_int32_t txtime, nextslottu, timer0;
5476	int32_t tudelta, tsfdelta;
5477	const struct ath_rx_status *rs;
5478	int rix;
5479
5480	sc->sc_stats.ast_tdma_update++;
5481
5482	/*
5483	 * Check for and adopt configuration changes.
5484	 */
5485	if (changed != 0) {
5486		const struct ieee80211_tdma_state *ts = vap->iv_tdma;
5487
5488		ath_tdma_bintvalsetup(sc, ts);
5489		if (changed & TDMA_UPDATE_SLOTLEN)
5490			ath_wme_update(ic);
5491
5492		DPRINTF(sc, ATH_DEBUG_TDMA,
5493		    "%s: adopt slot %u slotcnt %u slotlen %u us "
5494		    "bintval %u TU\n", __func__,
5495		    ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
5496		    sc->sc_tdmabintval);
5497
5498		/* XXX right? */
5499		ath_hal_intrset(ah, sc->sc_imask);
5500		/* NB: beacon timers programmed below */
5501	}
5502
5503	/* extend rx timestamp to 64 bits */
5504	rs = sc->sc_lastrs;
5505	tsf = ath_hal_gettsf64(ah);
5506	rstamp = ath_extend_tsf(rs->rs_tstamp, tsf);
5507	/*
5508	 * The rx timestamp is set by the hardware on completing
5509	 * reception (at the point where the rx descriptor is DMA'd
5510	 * to the host).  To find the start of our next slot we
5511	 * must adjust this time by the time required to send
5512	 * the packet just received.
5513	 */
5514	rix = rt->rateCodeToIndex[rs->rs_rate];
5515	txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
5516	    rt->info[rix].shortPreamble);
5517	/* NB: << 9 is to cvt to TU and /2 */
5518	nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
5519	nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
5520
5521	/*
5522	 * TIMER0 is the h/w's idea of NextTBTT (in TU's).  Convert
5523	 * to usecs and calculate the difference between what the
5524	 * other station thinks and what we have programmed.  This
5525	 * lets us figure how to adjust our timers to match.  The
5526	 * adjustments are done by pulling the TSF forward and possibly
5527	 * rewriting the beacon timers.
5528	 */
5529	timer0 = ath_hal_getnexttbtt(ah);
5530	tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD+1)) - TU_TO_TSF(timer0));
5531
5532	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
5533	    "tsfdelta %d avg +%d/-%d\n", tsfdelta,
5534	    TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
5535
5536	if (tsfdelta < 0) {
5537		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
5538		TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
5539		tsfdelta = -tsfdelta % 1024;
5540		nextslottu++;
5541	} else if (tsfdelta > 0) {
5542		TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
5543		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
5544		tsfdelta = 1024 - (tsfdelta % 1024);
5545		nextslottu++;
5546	} else {
5547		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
5548		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
5549	}
5550	tudelta = nextslottu - timer0;
5551
5552	/*
5553	 * Copy sender's timetstamp into tdma ie so they can
5554	 * calculate roundtrip time.  We submit a beacon frame
5555	 * below after any timer adjustment.  The frame goes out
5556	 * at the next TBTT so the sender can calculate the
5557	 * roundtrip by inspecting the tdma ie in our beacon frame.
5558	 *
5559	 * NB: This tstamp is subtlely preserved when
5560	 *     IEEE80211_BEACON_TDMA is marked (e.g. when the
5561	 *     slot position changes) because ieee80211_add_tdma
5562	 *     skips over the data.
5563	 */
5564	memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
5565		__offsetof(struct ieee80211_tdma_param, tdma_tstamp),
5566		&ni->ni_tstamp.data, 8);
5567#if 0
5568	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
5569	    "tsf %llu nextslot %llu (%d, %d) nextslottu %u timer0 %u (%d)\n",
5570	    (unsigned long long) tsf, (unsigned long long) nextslot,
5571	    (int)(nextslot - tsf), tsfdelta,
5572	    nextslottu, timer0, tudelta);
5573#endif
5574	/*
5575	 * Adjust the beacon timers only when pulling them forward
5576	 * or when going back by less than the beacon interval.
5577	 * Negative jumps larger than the beacon interval seem to
5578	 * cause the timers to stop and generally cause instability.
5579	 * This basically filters out jumps due to missed beacons.
5580	 */
5581	if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
5582		ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
5583		sc->sc_stats.ast_tdma_timers++;
5584	}
5585	if (tsfdelta > 0) {
5586		ath_hal_adjusttsf(ah, tsfdelta);
5587		sc->sc_stats.ast_tdma_tsf++;
5588	}
5589	ath_tdma_beacon_send(sc, vap);		/* prepare response */
5590#undef TU_TO_TSF
5591#undef TSF_TO_TU
5592}
5593
5594/*
5595 * Transmit a beacon frame at SWBA.  Dynamic updates
5596 * to the frame contents are done as needed.
5597 */
5598static void
5599ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
5600{
5601	struct ath_hal *ah = sc->sc_ah;
5602	struct ath_buf *bf;
5603	int otherant;
5604
5605	/*
5606	 * Check if the previous beacon has gone out.  If
5607	 * not don't try to post another, skip this period
5608	 * and wait for the next.  Missed beacons indicate
5609	 * a problem and should not occur.  If we miss too
5610	 * many consecutive beacons reset the device.
5611	 */
5612	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
5613		sc->sc_bmisscount++;
5614		DPRINTF(sc, ATH_DEBUG_BEACON,
5615			"%s: missed %u consecutive beacons\n",
5616			__func__, sc->sc_bmisscount);
5617		if (sc->sc_bmisscount >= ath_bstuck_threshold)
5618			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
5619		return;
5620	}
5621	if (sc->sc_bmisscount != 0) {
5622		DPRINTF(sc, ATH_DEBUG_BEACON,
5623			"%s: resume beacon xmit after %u misses\n",
5624			__func__, sc->sc_bmisscount);
5625		sc->sc_bmisscount = 0;
5626	}
5627
5628	/*
5629	 * Check recent per-antenna transmit statistics and flip
5630	 * the default antenna if noticeably more frames went out
5631	 * on the non-default antenna.
5632	 * XXX assumes 2 anntenae
5633	 */
5634	if (!sc->sc_diversity) {
5635		otherant = sc->sc_defant & 1 ? 2 : 1;
5636		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
5637			ath_setdefantenna(sc, otherant);
5638		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
5639	}
5640
5641	bf = ath_beacon_generate(sc, vap);
5642	if (bf != NULL) {
5643		/*
5644		 * Stop any current dma and put the new frame on the queue.
5645		 * This should never fail since we check above that no frames
5646		 * are still pending on the queue.
5647		 */
5648		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
5649			DPRINTF(sc, ATH_DEBUG_ANY,
5650				"%s: beacon queue %u did not stop?\n",
5651				__func__, sc->sc_bhalq);
5652			/* NB: the HAL still stops DMA, so proceed */
5653		}
5654		ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
5655		ath_hal_txstart(ah, sc->sc_bhalq);
5656
5657		sc->sc_stats.ast_be_xmit++;		/* XXX per-vap? */
5658
5659		/*
5660		 * Record local TSF for our last send for use
5661		 * in arbitrating slot collisions.
5662		 */
5663		vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
5664	}
5665}
5666#endif /* IEEE80211_SUPPORT_TDMA */
5667
5668MODULE_VERSION(if_ath, 1);
5669MODULE_DEPEND(if_ath, wlan, 1, 1, 1);          /* 802.11 media layer */
5670