if_ath.c revision 190571
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 *    redistribution must be conditioned upon including a substantially
14 *    similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 190571 2009-03-30 19:23:49Z sam $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42#include "opt_wlan.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysctl.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/errno.h>
55#include <sys/callout.h>
56#include <sys/bus.h>
57#include <sys/endian.h>
58#include <sys/kthread.h>
59#include <sys/taskqueue.h>
60#include <sys/priv.h>
61
62#include <machine/bus.h>
63
64#include <net/if.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_arp.h>
69#include <net/ethernet.h>
70#include <net/if_llc.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
74#ifdef IEEE80211_SUPPORT_TDMA
75#include <net80211/ieee80211_tdma.h>
76#endif
77
78#include <net/bpf.h>
79
80#ifdef INET
81#include <netinet/in.h>
82#include <netinet/if_ether.h>
83#endif
84
85#include <dev/ath/if_athvar.h>
86#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
87
88#ifdef ATH_TX99_DIAG
89#include <dev/ath/ath_tx99/ath_tx99.h>
90#endif
91
92/*
93 * ATH_BCBUF determines the number of vap's that can transmit
94 * beacons and also (currently) the number of vap's that can
95 * have unique mac addresses/bssid.  When staggering beacons
96 * 4 is probably a good max as otherwise the beacons become
97 * very closely spaced and there is limited time for cab q traffic
98 * to go out.  You can burst beacons instead but that is not good
99 * for stations in power save and at some point you really want
100 * another radio (and channel).
101 *
102 * The limit on the number of mac addresses is tied to our use of
103 * the U/L bit and tracking addresses in a byte; it would be
104 * worthwhile to allow more for applications like proxy sta.
105 */
106CTASSERT(ATH_BCBUF <= 8);
107
108/* unaligned little endian access */
109#define LE_READ_2(p)							\
110	((u_int16_t)							\
111	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8)))
112#define LE_READ_4(p)							\
113	((u_int32_t)							\
114	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8) |	\
115	  (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
116
117static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
118		    const char name[IFNAMSIZ], int unit, int opmode,
119		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
120		    const uint8_t mac[IEEE80211_ADDR_LEN]);
121static void	ath_vap_delete(struct ieee80211vap *);
122static void	ath_init(void *);
123static void	ath_stop_locked(struct ifnet *);
124static void	ath_stop(struct ifnet *);
125static void	ath_start(struct ifnet *);
126static int	ath_reset(struct ifnet *);
127static int	ath_reset_vap(struct ieee80211vap *, u_long);
128static int	ath_media_change(struct ifnet *);
129static void	ath_watchdog(void *);
130static int	ath_ioctl(struct ifnet *, u_long, caddr_t);
131static void	ath_fatal_proc(void *, int);
132static void	ath_bmiss_vap(struct ieee80211vap *);
133static void	ath_bmiss_proc(void *, int);
134static int	ath_keyset(struct ath_softc *, const struct ieee80211_key *,
135			struct ieee80211_node *);
136static int	ath_key_alloc(struct ieee80211vap *,
137			struct ieee80211_key *,
138			ieee80211_keyix *, ieee80211_keyix *);
139static int	ath_key_delete(struct ieee80211vap *,
140			const struct ieee80211_key *);
141static int	ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
142			const u_int8_t mac[IEEE80211_ADDR_LEN]);
143static void	ath_key_update_begin(struct ieee80211vap *);
144static void	ath_key_update_end(struct ieee80211vap *);
145static void	ath_update_mcast(struct ifnet *);
146static void	ath_update_promisc(struct ifnet *);
147static void	ath_mode_init(struct ath_softc *);
148static void	ath_setslottime(struct ath_softc *);
149static void	ath_updateslot(struct ifnet *);
150static int	ath_beaconq_setup(struct ath_hal *);
151static int	ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
152static void	ath_beacon_update(struct ieee80211vap *, int item);
153static void	ath_beacon_setup(struct ath_softc *, struct ath_buf *);
154static void	ath_beacon_proc(void *, int);
155static struct ath_buf *ath_beacon_generate(struct ath_softc *,
156			struct ieee80211vap *);
157static void	ath_bstuck_proc(void *, int);
158static void	ath_beacon_return(struct ath_softc *, struct ath_buf *);
159static void	ath_beacon_free(struct ath_softc *);
160static void	ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
161static void	ath_descdma_cleanup(struct ath_softc *sc,
162			struct ath_descdma *, ath_bufhead *);
163static int	ath_desc_alloc(struct ath_softc *);
164static void	ath_desc_free(struct ath_softc *);
165static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
166			const uint8_t [IEEE80211_ADDR_LEN]);
167static void	ath_node_free(struct ieee80211_node *);
168static void	ath_node_getsignal(const struct ieee80211_node *,
169			int8_t *, int8_t *);
170static int	ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
171static void	ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
172			int subtype, int rssi, int noise, u_int32_t rstamp);
173static void	ath_setdefantenna(struct ath_softc *, u_int);
174static void	ath_rx_proc(void *, int);
175static void	ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
176static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
177static int	ath_tx_setup(struct ath_softc *, int, int);
178static int	ath_wme_update(struct ieee80211com *);
179static void	ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
180static void	ath_tx_cleanup(struct ath_softc *);
181static void	ath_freetx(struct mbuf *);
182static int	ath_tx_start(struct ath_softc *, struct ieee80211_node *,
183			     struct ath_buf *, struct mbuf *);
184static void	ath_tx_proc_q0(void *, int);
185static void	ath_tx_proc_q0123(void *, int);
186static void	ath_tx_proc(void *, int);
187static void	ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
188static int	ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
189static void	ath_draintxq(struct ath_softc *);
190static void	ath_stoprecv(struct ath_softc *);
191static int	ath_startrecv(struct ath_softc *);
192static void	ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
193static void	ath_scan_start(struct ieee80211com *);
194static void	ath_scan_end(struct ieee80211com *);
195static void	ath_set_channel(struct ieee80211com *);
196static void	ath_calibrate(void *);
197static int	ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
198static void	ath_setup_stationkey(struct ieee80211_node *);
199static void	ath_newassoc(struct ieee80211_node *, int);
200static int	ath_setregdomain(struct ieee80211com *,
201		    struct ieee80211_regdomain *, int,
202		    struct ieee80211_channel []);
203static void	ath_getradiocaps(struct ieee80211com *, int, int *,
204		    struct ieee80211_channel []);
205static int	ath_getchannels(struct ath_softc *);
206static void	ath_led_event(struct ath_softc *, int);
207
208static int	ath_rate_setup(struct ath_softc *, u_int mode);
209static void	ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
210
211static void	ath_sysctlattach(struct ath_softc *);
212static int	ath_raw_xmit(struct ieee80211_node *,
213			struct mbuf *, const struct ieee80211_bpf_params *);
214static void	ath_bpfattach(struct ath_softc *);
215static void	ath_announce(struct ath_softc *);
216
217#ifdef IEEE80211_SUPPORT_TDMA
218static void	ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
219		    u_int32_t bintval);
220static void	ath_tdma_bintvalsetup(struct ath_softc *sc,
221		    const struct ieee80211_tdma_state *tdma);
222static void	ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
223static void	ath_tdma_update(struct ieee80211_node *ni,
224		    const struct ieee80211_tdma_param *tdma, int);
225static void	ath_tdma_beacon_send(struct ath_softc *sc,
226		    struct ieee80211vap *vap);
227
228static __inline void
229ath_hal_setcca(struct ath_hal *ah, int ena)
230{
231	/*
232	 * NB: fill me in; this is not provided by default because disabling
233	 *     CCA in most locales violates regulatory.
234	 */
235}
236
237static __inline int
238ath_hal_getcca(struct ath_hal *ah)
239{
240	u_int32_t diag;
241	if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
242		return 1;
243	return ((diag & 0x500000) == 0);
244}
245
246#define	TDMA_EP_MULTIPLIER	(1<<10) /* pow2 to optimize out * and / */
247#define	TDMA_LPF_LEN		6
248#define	TDMA_DUMMY_MARKER	0x127
249#define	TDMA_EP_MUL(x, mul)	((x) * (mul))
250#define	TDMA_IN(x)		(TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
251#define	TDMA_LPF(x, y, len) \
252    ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
253#define	TDMA_SAMPLE(x, y) do {					\
254	x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN);		\
255} while (0)
256#define	TDMA_EP_RND(x,mul) \
257	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
258#define	TDMA_AVG(x)		TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
259#endif /* IEEE80211_SUPPORT_TDMA */
260
261SYSCTL_DECL(_hw_ath);
262
263/* XXX validate sysctl values */
264static	int ath_longcalinterval = 30;		/* long cals every 30 secs */
265SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
266	    0, "long chip calibration interval (secs)");
267static	int ath_shortcalinterval = 100;		/* short cals every 100 ms */
268SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
269	    0, "short chip calibration interval (msecs)");
270static	int ath_resetcalinterval = 20*60;	/* reset cal state 20 mins */
271SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
272	    0, "reset chip calibration results (secs)");
273
274static	int ath_rxbuf = ATH_RXBUF;		/* # rx buffers to allocate */
275SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
276	    0, "rx buffers allocated");
277TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
278static	int ath_txbuf = ATH_TXBUF;		/* # tx buffers to allocate */
279SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
280	    0, "tx buffers allocated");
281TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
282
283static	int ath_bstuck_threshold = 4;		/* max missed beacons */
284SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
285	    0, "max missed beacon xmits before chip reset");
286
287#ifdef ATH_DEBUG
288enum {
289	ATH_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
290	ATH_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
291	ATH_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
292	ATH_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
293	ATH_DEBUG_RATE		= 0x00000010,	/* rate control */
294	ATH_DEBUG_RESET		= 0x00000020,	/* reset processing */
295	ATH_DEBUG_MODE		= 0x00000040,	/* mode init/setup */
296	ATH_DEBUG_BEACON 	= 0x00000080,	/* beacon handling */
297	ATH_DEBUG_WATCHDOG 	= 0x00000100,	/* watchdog timeout */
298	ATH_DEBUG_INTR		= 0x00001000,	/* ISR */
299	ATH_DEBUG_TX_PROC	= 0x00002000,	/* tx ISR proc */
300	ATH_DEBUG_RX_PROC	= 0x00004000,	/* rx ISR proc */
301	ATH_DEBUG_BEACON_PROC	= 0x00008000,	/* beacon ISR proc */
302	ATH_DEBUG_CALIBRATE	= 0x00010000,	/* periodic calibration */
303	ATH_DEBUG_KEYCACHE	= 0x00020000,	/* key cache management */
304	ATH_DEBUG_STATE		= 0x00040000,	/* 802.11 state transitions */
305	ATH_DEBUG_NODE		= 0x00080000,	/* node management */
306	ATH_DEBUG_LED		= 0x00100000,	/* led management */
307	ATH_DEBUG_FF		= 0x00200000,	/* fast frames */
308	ATH_DEBUG_DFS		= 0x00400000,	/* DFS processing */
309	ATH_DEBUG_TDMA		= 0x00800000,	/* TDMA processing */
310	ATH_DEBUG_TDMA_TIMER	= 0x01000000,	/* TDMA timer processing */
311	ATH_DEBUG_REGDOMAIN	= 0x02000000,	/* regulatory processing */
312	ATH_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
313	ATH_DEBUG_ANY		= 0xffffffff
314};
315static	int ath_debug = 0;
316SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
317	    0, "control debugging printfs");
318TUNABLE_INT("hw.ath.debug", &ath_debug);
319
320#define	IFF_DUMPPKTS(sc, m) \
321	((sc->sc_debug & (m)) || \
322	    (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
323#define	DPRINTF(sc, m, fmt, ...) do {				\
324	if (sc->sc_debug & (m))					\
325		printf(fmt, __VA_ARGS__);			\
326} while (0)
327#define	KEYPRINTF(sc, ix, hk, mac) do {				\
328	if (sc->sc_debug & ATH_DEBUG_KEYCACHE)			\
329		ath_keyprint(sc, __func__, ix, hk, mac);	\
330} while (0)
331static	void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
332	u_int ix, int);
333static	void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
334	u_int qnum, u_int ix, int done);
335#else
336#define	IFF_DUMPPKTS(sc, m) \
337	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
338#define	DPRINTF(sc, m, fmt, ...) do {				\
339	(void) sc;						\
340} while (0)
341#define	KEYPRINTF(sc, k, ix, mac) do {				\
342	(void) sc;						\
343} while (0)
344#endif
345
346MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
347
348int
349ath_attach(u_int16_t devid, struct ath_softc *sc)
350{
351	struct ifnet *ifp;
352	struct ieee80211com *ic;
353	struct ath_hal *ah = NULL;
354	HAL_STATUS status;
355	int error = 0, i;
356	u_int wmodes;
357	uint8_t macaddr[IEEE80211_ADDR_LEN];
358
359	DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
360
361	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
362	if (ifp == NULL) {
363		device_printf(sc->sc_dev, "can not if_alloc()\n");
364		error = ENOSPC;
365		goto bad;
366	}
367	ic = ifp->if_l2com;
368
369	/* set these up early for if_printf use */
370	if_initname(ifp, device_get_name(sc->sc_dev),
371		device_get_unit(sc->sc_dev));
372
373	ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
374	if (ah == NULL) {
375		if_printf(ifp, "unable to attach hardware; HAL status %u\n",
376			status);
377		error = ENXIO;
378		goto bad;
379	}
380	sc->sc_ah = ah;
381	sc->sc_invalid = 0;	/* ready to go, enable interrupt handling */
382#ifdef	ATH_DEBUG
383	sc->sc_debug = ath_debug;
384#endif
385
386	/*
387	 * Check if the MAC has multi-rate retry support.
388	 * We do this by trying to setup a fake extended
389	 * descriptor.  MAC's that don't have support will
390	 * return false w/o doing anything.  MAC's that do
391	 * support it will return true w/o doing anything.
392	 */
393	sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
394
395	/*
396	 * Check if the device has hardware counters for PHY
397	 * errors.  If so we need to enable the MIB interrupt
398	 * so we can act on stat triggers.
399	 */
400	if (ath_hal_hwphycounters(ah))
401		sc->sc_needmib = 1;
402
403	/*
404	 * Get the hardware key cache size.
405	 */
406	sc->sc_keymax = ath_hal_keycachesize(ah);
407	if (sc->sc_keymax > ATH_KEYMAX) {
408		if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
409			ATH_KEYMAX, sc->sc_keymax);
410		sc->sc_keymax = ATH_KEYMAX;
411	}
412	/*
413	 * Reset the key cache since some parts do not
414	 * reset the contents on initial power up.
415	 */
416	for (i = 0; i < sc->sc_keymax; i++)
417		ath_hal_keyreset(ah, i);
418
419	/*
420	 * Collect the default channel list.
421	 */
422	error = ath_getchannels(sc);
423	if (error != 0)
424		goto bad;
425
426	/*
427	 * Setup rate tables for all potential media types.
428	 */
429	ath_rate_setup(sc, IEEE80211_MODE_11A);
430	ath_rate_setup(sc, IEEE80211_MODE_11B);
431	ath_rate_setup(sc, IEEE80211_MODE_11G);
432	ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
433	ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
434	ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
435	ath_rate_setup(sc, IEEE80211_MODE_11NA);
436	ath_rate_setup(sc, IEEE80211_MODE_11NG);
437	ath_rate_setup(sc, IEEE80211_MODE_HALF);
438	ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
439
440	/* NB: setup here so ath_rate_update is happy */
441	ath_setcurmode(sc, IEEE80211_MODE_11A);
442
443	/*
444	 * Allocate tx+rx descriptors and populate the lists.
445	 */
446	error = ath_desc_alloc(sc);
447	if (error != 0) {
448		if_printf(ifp, "failed to allocate descriptors: %d\n", error);
449		goto bad;
450	}
451	callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
452	callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
453
454	ATH_TXBUF_LOCK_INIT(sc);
455
456	sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
457		taskqueue_thread_enqueue, &sc->sc_tq);
458	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
459		"%s taskq", ifp->if_xname);
460
461	TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
462	TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
463	TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
464
465	/*
466	 * Allocate hardware transmit queues: one queue for
467	 * beacon frames and one data queue for each QoS
468	 * priority.  Note that the hal handles reseting
469	 * these queues at the needed time.
470	 *
471	 * XXX PS-Poll
472	 */
473	sc->sc_bhalq = ath_beaconq_setup(ah);
474	if (sc->sc_bhalq == (u_int) -1) {
475		if_printf(ifp, "unable to setup a beacon xmit queue!\n");
476		error = EIO;
477		goto bad2;
478	}
479	sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
480	if (sc->sc_cabq == NULL) {
481		if_printf(ifp, "unable to setup CAB xmit queue!\n");
482		error = EIO;
483		goto bad2;
484	}
485	/* NB: insure BK queue is the lowest priority h/w queue */
486	if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
487		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
488			ieee80211_wme_acnames[WME_AC_BK]);
489		error = EIO;
490		goto bad2;
491	}
492	if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
493	    !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
494	    !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
495		/*
496		 * Not enough hardware tx queues to properly do WME;
497		 * just punt and assign them all to the same h/w queue.
498		 * We could do a better job of this if, for example,
499		 * we allocate queues when we switch from station to
500		 * AP mode.
501		 */
502		if (sc->sc_ac2q[WME_AC_VI] != NULL)
503			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
504		if (sc->sc_ac2q[WME_AC_BE] != NULL)
505			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
506		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
507		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
508		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
509	}
510
511	/*
512	 * Special case certain configurations.  Note the
513	 * CAB queue is handled by these specially so don't
514	 * include them when checking the txq setup mask.
515	 */
516	switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
517	case 0x01:
518		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
519		break;
520	case 0x0f:
521		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
522		break;
523	default:
524		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
525		break;
526	}
527
528	/*
529	 * Setup rate control.  Some rate control modules
530	 * call back to change the anntena state so expose
531	 * the necessary entry points.
532	 * XXX maybe belongs in struct ath_ratectrl?
533	 */
534	sc->sc_setdefantenna = ath_setdefantenna;
535	sc->sc_rc = ath_rate_attach(sc);
536	if (sc->sc_rc == NULL) {
537		error = EIO;
538		goto bad2;
539	}
540
541	sc->sc_blinking = 0;
542	sc->sc_ledstate = 1;
543	sc->sc_ledon = 0;			/* low true */
544	sc->sc_ledidle = (2700*hz)/1000;	/* 2.7sec */
545	callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
546	/*
547	 * Auto-enable soft led processing for IBM cards and for
548	 * 5211 minipci cards.  Users can also manually enable/disable
549	 * support with a sysctl.
550	 */
551	sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
552	if (sc->sc_softled) {
553		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
554		    HAL_GPIO_MUX_MAC_NETWORK_LED);
555		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
556	}
557
558	ifp->if_softc = sc;
559	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
560	ifp->if_start = ath_start;
561	ifp->if_watchdog = NULL;
562	ifp->if_ioctl = ath_ioctl;
563	ifp->if_init = ath_init;
564	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
565	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
566	IFQ_SET_READY(&ifp->if_snd);
567
568	ic->ic_ifp = ifp;
569	/* XXX not right but it's not used anywhere important */
570	ic->ic_phytype = IEEE80211_T_OFDM;
571	ic->ic_opmode = IEEE80211_M_STA;
572	ic->ic_caps =
573		  IEEE80211_C_STA		/* station mode */
574		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
575		| IEEE80211_C_HOSTAP		/* hostap mode */
576		| IEEE80211_C_MONITOR		/* monitor mode */
577		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
578		| IEEE80211_C_WDS		/* 4-address traffic works */
579		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
580		| IEEE80211_C_SHSLOT		/* short slot time supported */
581		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
582		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
583		| IEEE80211_C_TXFRAG		/* handle tx frags */
584		;
585	/*
586	 * Query the hal to figure out h/w crypto support.
587	 */
588	if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
589		ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
590	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
591		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
592	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
593		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
594	if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
595		ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
596	if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
597		ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
598		/*
599		 * Check if h/w does the MIC and/or whether the
600		 * separate key cache entries are required to
601		 * handle both tx+rx MIC keys.
602		 */
603		if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
604			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
605		/*
606		 * If the h/w supports storing tx+rx MIC keys
607		 * in one cache slot automatically enable use.
608		 */
609		if (ath_hal_hastkipsplit(ah) ||
610		    !ath_hal_settkipsplit(ah, AH_FALSE))
611			sc->sc_splitmic = 1;
612		/*
613		 * If the h/w can do TKIP MIC together with WME then
614		 * we use it; otherwise we force the MIC to be done
615		 * in software by the net80211 layer.
616		 */
617		if (ath_hal_haswmetkipmic(ah))
618			sc->sc_wmetkipmic = 1;
619	}
620	sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
621	sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
622	/*
623	 * Mark key cache slots associated with global keys
624	 * as in use.  If we knew TKIP was not to be used we
625	 * could leave the +32, +64, and +32+64 slots free.
626	 */
627	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
628		setbit(sc->sc_keymap, i);
629		setbit(sc->sc_keymap, i+64);
630		if (sc->sc_splitmic) {
631			setbit(sc->sc_keymap, i+32);
632			setbit(sc->sc_keymap, i+32+64);
633		}
634	}
635	/*
636	 * TPC support can be done either with a global cap or
637	 * per-packet support.  The latter is not available on
638	 * all parts.  We're a bit pedantic here as all parts
639	 * support a global cap.
640	 */
641	if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
642		ic->ic_caps |= IEEE80211_C_TXPMGT;
643
644	/*
645	 * Mark WME capability only if we have sufficient
646	 * hardware queues to do proper priority scheduling.
647	 */
648	if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
649		ic->ic_caps |= IEEE80211_C_WME;
650	/*
651	 * Check for misc other capabilities.
652	 */
653	if (ath_hal_hasbursting(ah))
654		ic->ic_caps |= IEEE80211_C_BURST;
655	sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
656	sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
657	if (ath_hal_hasfastframes(ah))
658		ic->ic_caps |= IEEE80211_C_FF;
659	wmodes = ath_hal_getwirelessmodes(ah);
660	if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
661		ic->ic_caps |= IEEE80211_C_TURBOP;
662#ifdef IEEE80211_SUPPORT_TDMA
663	if (ath_hal_macversion(ah) > 0x78) {
664		ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
665		ic->ic_tdma_update = ath_tdma_update;
666	}
667#endif
668	/*
669	 * Indicate we need the 802.11 header padded to a
670	 * 32-bit boundary for 4-address and QoS frames.
671	 */
672	ic->ic_flags |= IEEE80211_F_DATAPAD;
673
674	/*
675	 * Query the hal about antenna support.
676	 */
677	sc->sc_defant = ath_hal_getdefantenna(ah);
678
679	/*
680	 * Not all chips have the VEOL support we want to
681	 * use with IBSS beacons; check here for it.
682	 */
683	sc->sc_hasveol = ath_hal_hasveol(ah);
684
685	/* get mac address from hardware */
686	ath_hal_getmac(ah, macaddr);
687	if (sc->sc_hasbmask)
688		ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
689
690	/* NB: used to size node table key mapping array */
691	ic->ic_max_keyix = sc->sc_keymax;
692	/* call MI attach routine. */
693	ieee80211_ifattach(ic, macaddr);
694	ic->ic_setregdomain = ath_setregdomain;
695	ic->ic_getradiocaps = ath_getradiocaps;
696	sc->sc_opmode = HAL_M_STA;
697
698	/* override default methods */
699	ic->ic_newassoc = ath_newassoc;
700	ic->ic_updateslot = ath_updateslot;
701	ic->ic_wme.wme_update = ath_wme_update;
702	ic->ic_vap_create = ath_vap_create;
703	ic->ic_vap_delete = ath_vap_delete;
704	ic->ic_raw_xmit = ath_raw_xmit;
705	ic->ic_update_mcast = ath_update_mcast;
706	ic->ic_update_promisc = ath_update_promisc;
707	ic->ic_node_alloc = ath_node_alloc;
708	sc->sc_node_free = ic->ic_node_free;
709	ic->ic_node_free = ath_node_free;
710	ic->ic_node_getsignal = ath_node_getsignal;
711	ic->ic_scan_start = ath_scan_start;
712	ic->ic_scan_end = ath_scan_end;
713	ic->ic_set_channel = ath_set_channel;
714
715	ath_bpfattach(sc);
716	/*
717	 * Setup dynamic sysctl's now that country code and
718	 * regdomain are available from the hal.
719	 */
720	ath_sysctlattach(sc);
721
722	if (bootverbose)
723		ieee80211_announce(ic);
724	ath_announce(sc);
725	return 0;
726bad2:
727	ath_tx_cleanup(sc);
728	ath_desc_free(sc);
729bad:
730	if (ah)
731		ath_hal_detach(ah);
732	if (ifp != NULL)
733		if_free(ifp);
734	sc->sc_invalid = 1;
735	return error;
736}
737
738int
739ath_detach(struct ath_softc *sc)
740{
741	struct ifnet *ifp = sc->sc_ifp;
742
743	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
744		__func__, ifp->if_flags);
745
746	/*
747	 * NB: the order of these is important:
748	 * o stop the chip so no more interrupts will fire
749	 * o call the 802.11 layer before detaching the hal to
750	 *   insure callbacks into the driver to delete global
751	 *   key cache entries can be handled
752	 * o free the taskqueue which drains any pending tasks
753	 * o reclaim the bpf tap now that we know nothing will use
754	 *   it (e.g. rx processing from the task q thread)
755	 * o reclaim the tx queue data structures after calling
756	 *   the 802.11 layer as we'll get called back to reclaim
757	 *   node state and potentially want to use them
758	 * o to cleanup the tx queues the hal is called, so detach
759	 *   it last
760	 * Other than that, it's straightforward...
761	 */
762	ath_stop(ifp);
763	ieee80211_ifdetach(ifp->if_l2com);
764	taskqueue_free(sc->sc_tq);
765	bpfdetach(ifp);
766#ifdef ATH_TX99_DIAG
767	if (sc->sc_tx99 != NULL)
768		sc->sc_tx99->detach(sc->sc_tx99);
769#endif
770	ath_rate_detach(sc->sc_rc);
771	ath_desc_free(sc);
772	ath_tx_cleanup(sc);
773	ath_hal_detach(sc->sc_ah);	/* NB: sets chip in full sleep */
774	if_free(ifp);
775
776	return 0;
777}
778
779/*
780 * MAC address handling for multiple BSS on the same radio.
781 * The first vap uses the MAC address from the EEPROM.  For
782 * subsequent vap's we set the U/L bit (bit 1) in the MAC
783 * address and use the next six bits as an index.
784 */
785static void
786assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
787{
788	int i;
789
790	if (clone && sc->sc_hasbmask) {
791		/* NB: we only do this if h/w supports multiple bssid */
792		for (i = 0; i < 8; i++)
793			if ((sc->sc_bssidmask & (1<<i)) == 0)
794				break;
795		if (i != 0)
796			mac[0] |= (i << 2)|0x2;
797	} else
798		i = 0;
799	sc->sc_bssidmask |= 1<<i;
800	sc->sc_hwbssidmask[0] &= ~mac[0];
801	if (i == 0)
802		sc->sc_nbssid0++;
803}
804
805static void
806reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
807{
808	int i = mac[0] >> 2;
809	uint8_t mask;
810
811	if (i != 0 || --sc->sc_nbssid0 == 0) {
812		sc->sc_bssidmask &= ~(1<<i);
813		/* recalculate bssid mask from remaining addresses */
814		mask = 0xff;
815		for (i = 1; i < 8; i++)
816			if (sc->sc_bssidmask & (1<<i))
817				mask &= ~((i<<2)|0x2);
818		sc->sc_hwbssidmask[0] |= mask;
819	}
820}
821
822/*
823 * Assign a beacon xmit slot.  We try to space out
824 * assignments so when beacons are staggered the
825 * traffic coming out of the cab q has maximal time
826 * to go out before the next beacon is scheduled.
827 */
828static int
829assign_bslot(struct ath_softc *sc)
830{
831	u_int slot, free;
832
833	free = 0;
834	for (slot = 0; slot < ATH_BCBUF; slot++)
835		if (sc->sc_bslot[slot] == NULL) {
836			if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
837			    sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
838				return slot;
839			free = slot;
840			/* NB: keep looking for a double slot */
841		}
842	return free;
843}
844
845static struct ieee80211vap *
846ath_vap_create(struct ieee80211com *ic,
847	const char name[IFNAMSIZ], int unit, int opmode, int flags,
848	const uint8_t bssid[IEEE80211_ADDR_LEN],
849	const uint8_t mac0[IEEE80211_ADDR_LEN])
850{
851	struct ath_softc *sc = ic->ic_ifp->if_softc;
852	struct ath_vap *avp;
853	struct ieee80211vap *vap;
854	uint8_t mac[IEEE80211_ADDR_LEN];
855	int ic_opmode, needbeacon, error;
856
857	avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
858	    M_80211_VAP, M_WAITOK | M_ZERO);
859	needbeacon = 0;
860	IEEE80211_ADDR_COPY(mac, mac0);
861
862	ATH_LOCK(sc);
863	switch (opmode) {
864	case IEEE80211_M_STA:
865		if (sc->sc_nstavaps != 0) {	/* XXX only 1 sta for now */
866			device_printf(sc->sc_dev, "only 1 sta vap supported\n");
867			goto bad;
868		}
869		if (sc->sc_nvaps) {
870			/*
871			 * When there are multiple vaps we must fall
872			 * back to s/w beacon miss handling.
873			 */
874			flags |= IEEE80211_CLONE_NOBEACONS;
875		}
876		if (flags & IEEE80211_CLONE_NOBEACONS)
877			ic_opmode = IEEE80211_M_HOSTAP;
878		else
879			ic_opmode = opmode;
880		break;
881	case IEEE80211_M_IBSS:
882		if (sc->sc_nvaps != 0) {	/* XXX only 1 for now */
883			device_printf(sc->sc_dev,
884			    "only 1 ibss vap supported\n");
885			goto bad;
886		}
887		ic_opmode = opmode;
888		needbeacon = 1;
889		break;
890	case IEEE80211_M_AHDEMO:
891#ifdef IEEE80211_SUPPORT_TDMA
892		if (flags & IEEE80211_CLONE_TDMA) {
893			needbeacon = 1;
894			flags |= IEEE80211_CLONE_NOBEACONS;
895		}
896		/* fall thru... */
897#endif
898	case IEEE80211_M_MONITOR:
899		if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
900			/* XXX not right for monitor mode */
901			ic_opmode = ic->ic_opmode;
902		} else
903			ic_opmode = opmode;
904		break;
905	case IEEE80211_M_HOSTAP:
906		needbeacon = 1;
907		/* fall thru... */
908	case IEEE80211_M_WDS:
909		if (sc->sc_nvaps && ic->ic_opmode == IEEE80211_M_STA) {
910			device_printf(sc->sc_dev,
911			    "wds not supported in sta mode\n");
912			goto bad;
913		}
914		if (opmode == IEEE80211_M_WDS) {
915			/*
916			 * Silently remove any request for a unique
917			 * bssid; WDS vap's always share the local
918			 * mac address.
919			 */
920			flags &= ~IEEE80211_CLONE_BSSID;
921		}
922		ic_opmode = IEEE80211_M_HOSTAP;
923		break;
924	default:
925		device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
926		goto bad;
927	}
928	/*
929	 * Check that a beacon buffer is available; the code below assumes it.
930	 */
931	if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
932		device_printf(sc->sc_dev, "no beacon buffer available\n");
933		goto bad;
934	}
935
936	/* STA, AHDEMO? */
937	if (opmode == IEEE80211_M_HOSTAP) {
938		assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
939		ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
940	}
941
942	vap = &avp->av_vap;
943	/* XXX can't hold mutex across if_alloc */
944	ATH_UNLOCK(sc);
945	error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
946	    bssid, mac);
947	ATH_LOCK(sc);
948	if (error != 0) {
949		device_printf(sc->sc_dev, "%s: error %d creating vap\n",
950		    __func__, error);
951		goto bad2;
952	}
953
954	/* h/w crypto support */
955	vap->iv_key_alloc = ath_key_alloc;
956	vap->iv_key_delete = ath_key_delete;
957	vap->iv_key_set = ath_key_set;
958	vap->iv_key_update_begin = ath_key_update_begin;
959	vap->iv_key_update_end = ath_key_update_end;
960
961	/* override various methods */
962	avp->av_recv_mgmt = vap->iv_recv_mgmt;
963	vap->iv_recv_mgmt = ath_recv_mgmt;
964	vap->iv_reset = ath_reset_vap;
965	vap->iv_update_beacon = ath_beacon_update;
966	avp->av_newstate = vap->iv_newstate;
967	vap->iv_newstate = ath_newstate;
968	avp->av_bmiss = vap->iv_bmiss;
969	vap->iv_bmiss = ath_bmiss_vap;
970
971	avp->av_bslot = -1;
972	if (needbeacon) {
973		/*
974		 * Allocate beacon state and setup the q for buffered
975		 * multicast frames.  We know a beacon buffer is
976		 * available because we checked above.
977		 */
978		avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
979		STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
980		if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
981			/*
982			 * Assign the vap to a beacon xmit slot.  As above
983			 * this cannot fail to find a free one.
984			 */
985			avp->av_bslot = assign_bslot(sc);
986			KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
987			    ("beacon slot %u not empty", avp->av_bslot));
988			sc->sc_bslot[avp->av_bslot] = vap;
989			sc->sc_nbcnvaps++;
990		}
991		if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
992			/*
993			 * Multple vaps are to transmit beacons and we
994			 * have h/w support for TSF adjusting; enable
995			 * use of staggered beacons.
996			 */
997			sc->sc_stagbeacons = 1;
998		}
999		ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1000	}
1001
1002	ic->ic_opmode = ic_opmode;
1003	if (opmode != IEEE80211_M_WDS) {
1004		sc->sc_nvaps++;
1005		if (opmode == IEEE80211_M_STA)
1006			sc->sc_nstavaps++;
1007	}
1008	switch (ic_opmode) {
1009	case IEEE80211_M_IBSS:
1010		sc->sc_opmode = HAL_M_IBSS;
1011		break;
1012	case IEEE80211_M_STA:
1013		sc->sc_opmode = HAL_M_STA;
1014		break;
1015	case IEEE80211_M_AHDEMO:
1016#ifdef IEEE80211_SUPPORT_TDMA
1017		if (vap->iv_caps & IEEE80211_C_TDMA) {
1018			sc->sc_tdma = 1;
1019			/* NB: disable tsf adjust */
1020			sc->sc_stagbeacons = 0;
1021		}
1022		/*
1023		 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1024		 * just ap mode.
1025		 */
1026		/* fall thru... */
1027#endif
1028	case IEEE80211_M_HOSTAP:
1029		sc->sc_opmode = HAL_M_HOSTAP;
1030		break;
1031	case IEEE80211_M_MONITOR:
1032		sc->sc_opmode = HAL_M_MONITOR;
1033		break;
1034	default:
1035		/* XXX should not happen */
1036		break;
1037	}
1038	if (sc->sc_hastsfadd) {
1039		/*
1040		 * Configure whether or not TSF adjust should be done.
1041		 */
1042		ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1043	}
1044	if (flags & IEEE80211_CLONE_NOBEACONS) {
1045		/*
1046		 * Enable s/w beacon miss handling.
1047		 */
1048		sc->sc_swbmiss = 1;
1049	}
1050	ATH_UNLOCK(sc);
1051
1052	/* complete setup */
1053	ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1054	return vap;
1055bad2:
1056	reclaim_address(sc, mac);
1057	ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1058bad:
1059	free(avp, M_80211_VAP);
1060	ATH_UNLOCK(sc);
1061	return NULL;
1062}
1063
1064static void
1065ath_vap_delete(struct ieee80211vap *vap)
1066{
1067	struct ieee80211com *ic = vap->iv_ic;
1068	struct ifnet *ifp = ic->ic_ifp;
1069	struct ath_softc *sc = ifp->if_softc;
1070	struct ath_hal *ah = sc->sc_ah;
1071	struct ath_vap *avp = ATH_VAP(vap);
1072
1073	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1074		/*
1075		 * Quiesce the hardware while we remove the vap.  In
1076		 * particular we need to reclaim all references to
1077		 * the vap state by any frames pending on the tx queues.
1078		 */
1079		ath_hal_intrset(ah, 0);		/* disable interrupts */
1080		ath_draintxq(sc);		/* stop xmit side */
1081		ath_stoprecv(sc);		/* stop recv side */
1082	}
1083
1084	ieee80211_vap_detach(vap);
1085	ATH_LOCK(sc);
1086	/*
1087	 * Reclaim beacon state.  Note this must be done before
1088	 * the vap instance is reclaimed as we may have a reference
1089	 * to it in the buffer for the beacon frame.
1090	 */
1091	if (avp->av_bcbuf != NULL) {
1092		if (avp->av_bslot != -1) {
1093			sc->sc_bslot[avp->av_bslot] = NULL;
1094			sc->sc_nbcnvaps--;
1095		}
1096		ath_beacon_return(sc, avp->av_bcbuf);
1097		avp->av_bcbuf = NULL;
1098		if (sc->sc_nbcnvaps == 0) {
1099			sc->sc_stagbeacons = 0;
1100			if (sc->sc_hastsfadd)
1101				ath_hal_settsfadjust(sc->sc_ah, 0);
1102		}
1103		/*
1104		 * Reclaim any pending mcast frames for the vap.
1105		 */
1106		ath_tx_draintxq(sc, &avp->av_mcastq);
1107		ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1108	}
1109	/*
1110	 * Update bookkeeping.
1111	 */
1112	if (vap->iv_opmode == IEEE80211_M_STA) {
1113		sc->sc_nstavaps--;
1114		if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1115			sc->sc_swbmiss = 0;
1116	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1117		reclaim_address(sc, vap->iv_myaddr);
1118		ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1119	}
1120	if (vap->iv_opmode != IEEE80211_M_WDS)
1121		sc->sc_nvaps--;
1122#ifdef IEEE80211_SUPPORT_TDMA
1123	/* TDMA operation ceases when the last vap is destroyed */
1124	if (sc->sc_tdma && sc->sc_nvaps == 0) {
1125		sc->sc_tdma = 0;
1126		sc->sc_swbmiss = 0;
1127	}
1128#endif
1129	ATH_UNLOCK(sc);
1130	free(avp, M_80211_VAP);
1131
1132	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1133		/*
1134		 * Restart rx+tx machines if still running (RUNNING will
1135		 * be reset if we just destroyed the last vap).
1136		 */
1137		if (ath_startrecv(sc) != 0)
1138			if_printf(ifp, "%s: unable to restart recv logic\n",
1139			    __func__);
1140		if (sc->sc_beacons)
1141			ath_beacon_config(sc, NULL);
1142		ath_hal_intrset(ah, sc->sc_imask);
1143	}
1144}
1145
1146void
1147ath_suspend(struct ath_softc *sc)
1148{
1149	struct ifnet *ifp = sc->sc_ifp;
1150	struct ieee80211com *ic = ifp->if_l2com;
1151
1152	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1153		__func__, ifp->if_flags);
1154
1155	sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1156	if (ic->ic_opmode == IEEE80211_M_STA)
1157		ath_stop(ifp);
1158	else
1159		ieee80211_suspend_all(ic);
1160	/*
1161	 * NB: don't worry about putting the chip in low power
1162	 * mode; pci will power off our socket on suspend and
1163	 * cardbus detaches the device.
1164	 */
1165}
1166
1167/*
1168 * Reset the key cache since some parts do not reset the
1169 * contents on resume.  First we clear all entries, then
1170 * re-load keys that the 802.11 layer assumes are setup
1171 * in h/w.
1172 */
1173static void
1174ath_reset_keycache(struct ath_softc *sc)
1175{
1176	struct ifnet *ifp = sc->sc_ifp;
1177	struct ieee80211com *ic = ifp->if_l2com;
1178	struct ath_hal *ah = sc->sc_ah;
1179	int i;
1180
1181	for (i = 0; i < sc->sc_keymax; i++)
1182		ath_hal_keyreset(ah, i);
1183	ieee80211_crypto_reload_keys(ic);
1184}
1185
1186void
1187ath_resume(struct ath_softc *sc)
1188{
1189	struct ifnet *ifp = sc->sc_ifp;
1190	struct ieee80211com *ic = ifp->if_l2com;
1191	struct ath_hal *ah = sc->sc_ah;
1192	HAL_STATUS status;
1193
1194	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1195		__func__, ifp->if_flags);
1196
1197	/*
1198	 * Must reset the chip before we reload the
1199	 * keycache as we were powered down on suspend.
1200	 */
1201	ath_hal_reset(ah, sc->sc_opmode,
1202	    sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1203	    AH_FALSE, &status);
1204	ath_reset_keycache(sc);
1205	if (sc->sc_resume_up) {
1206		if (ic->ic_opmode == IEEE80211_M_STA) {
1207			ath_init(sc);
1208			ieee80211_beacon_miss(ic);
1209		} else
1210			ieee80211_resume_all(ic);
1211	}
1212	if (sc->sc_softled) {
1213		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1214		    HAL_GPIO_MUX_MAC_NETWORK_LED);
1215		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1216	}
1217}
1218
1219void
1220ath_shutdown(struct ath_softc *sc)
1221{
1222	struct ifnet *ifp = sc->sc_ifp;
1223
1224	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1225		__func__, ifp->if_flags);
1226
1227	ath_stop(ifp);
1228	/* NB: no point powering down chip as we're about to reboot */
1229}
1230
1231/*
1232 * Interrupt handler.  Most of the actual processing is deferred.
1233 */
1234void
1235ath_intr(void *arg)
1236{
1237	struct ath_softc *sc = arg;
1238	struct ifnet *ifp = sc->sc_ifp;
1239	struct ath_hal *ah = sc->sc_ah;
1240	HAL_INT status;
1241
1242	if (sc->sc_invalid) {
1243		/*
1244		 * The hardware is not ready/present, don't touch anything.
1245		 * Note this can happen early on if the IRQ is shared.
1246		 */
1247		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1248		return;
1249	}
1250	if (!ath_hal_intrpend(ah))		/* shared irq, not for us */
1251		return;
1252	if ((ifp->if_flags & IFF_UP) == 0 ||
1253	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1254		HAL_INT status;
1255
1256		DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1257			__func__, ifp->if_flags);
1258		ath_hal_getisr(ah, &status);	/* clear ISR */
1259		ath_hal_intrset(ah, 0);		/* disable further intr's */
1260		return;
1261	}
1262	/*
1263	 * Figure out the reason(s) for the interrupt.  Note
1264	 * that the hal returns a pseudo-ISR that may include
1265	 * bits we haven't explicitly enabled so we mask the
1266	 * value to insure we only process bits we requested.
1267	 */
1268	ath_hal_getisr(ah, &status);		/* NB: clears ISR too */
1269	DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1270	status &= sc->sc_imask;			/* discard unasked for bits */
1271	if (status & HAL_INT_FATAL) {
1272		sc->sc_stats.ast_hardware++;
1273		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
1274		ath_fatal_proc(sc, 0);
1275	} else {
1276		if (status & HAL_INT_SWBA) {
1277			/*
1278			 * Software beacon alert--time to send a beacon.
1279			 * Handle beacon transmission directly; deferring
1280			 * this is too slow to meet timing constraints
1281			 * under load.
1282			 */
1283#ifdef IEEE80211_SUPPORT_TDMA
1284			if (sc->sc_tdma) {
1285				if (sc->sc_tdmaswba == 0) {
1286					struct ieee80211com *ic = ifp->if_l2com;
1287					struct ieee80211vap *vap =
1288					    TAILQ_FIRST(&ic->ic_vaps);
1289					ath_tdma_beacon_send(sc, vap);
1290					sc->sc_tdmaswba =
1291					    vap->iv_tdma->tdma_bintval;
1292				} else
1293					sc->sc_tdmaswba--;
1294			} else
1295#endif
1296				ath_beacon_proc(sc, 0);
1297		}
1298		if (status & HAL_INT_RXEOL) {
1299			/*
1300			 * NB: the hardware should re-read the link when
1301			 *     RXE bit is written, but it doesn't work at
1302			 *     least on older hardware revs.
1303			 */
1304			sc->sc_stats.ast_rxeol++;
1305			sc->sc_rxlink = NULL;
1306		}
1307		if (status & HAL_INT_TXURN) {
1308			sc->sc_stats.ast_txurn++;
1309			/* bump tx trigger level */
1310			ath_hal_updatetxtriglevel(ah, AH_TRUE);
1311		}
1312		if (status & HAL_INT_RX)
1313			taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1314		if (status & HAL_INT_TX)
1315			taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1316		if (status & HAL_INT_BMISS) {
1317			sc->sc_stats.ast_bmiss++;
1318			taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1319		}
1320		if (status & HAL_INT_MIB) {
1321			sc->sc_stats.ast_mib++;
1322			/*
1323			 * Disable interrupts until we service the MIB
1324			 * interrupt; otherwise it will continue to fire.
1325			 */
1326			ath_hal_intrset(ah, 0);
1327			/*
1328			 * Let the hal handle the event.  We assume it will
1329			 * clear whatever condition caused the interrupt.
1330			 */
1331			ath_hal_mibevent(ah, &sc->sc_halstats);
1332			ath_hal_intrset(ah, sc->sc_imask);
1333		}
1334		if (status & HAL_INT_RXORN) {
1335			/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1336			sc->sc_stats.ast_rxorn++;
1337		}
1338	}
1339}
1340
1341static void
1342ath_fatal_proc(void *arg, int pending)
1343{
1344	struct ath_softc *sc = arg;
1345	struct ifnet *ifp = sc->sc_ifp;
1346	u_int32_t *state;
1347	u_int32_t len;
1348	void *sp;
1349
1350	if_printf(ifp, "hardware error; resetting\n");
1351	/*
1352	 * Fatal errors are unrecoverable.  Typically these
1353	 * are caused by DMA errors.  Collect h/w state from
1354	 * the hal so we can diagnose what's going on.
1355	 */
1356	if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1357		KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1358		state = sp;
1359		if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1360		    state[0], state[1] , state[2], state[3],
1361		    state[4], state[5]);
1362	}
1363	ath_reset(ifp);
1364}
1365
1366static void
1367ath_bmiss_vap(struct ieee80211vap *vap)
1368{
1369	/*
1370	 * Workaround phantom bmiss interrupts by sanity-checking
1371	 * the time of our last rx'd frame.  If it is within the
1372	 * beacon miss interval then ignore the interrupt.  If it's
1373	 * truly a bmiss we'll get another interrupt soon and that'll
1374	 * be dispatched up for processing.  Note this applies only
1375	 * for h/w beacon miss events.
1376	 */
1377	if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1378		struct ifnet *ifp = vap->iv_ic->ic_ifp;
1379		struct ath_softc *sc = ifp->if_softc;
1380		u_int64_t lastrx = sc->sc_lastrx;
1381		u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1382		u_int bmisstimeout =
1383			vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1384
1385		DPRINTF(sc, ATH_DEBUG_BEACON,
1386		    "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1387		    __func__, (unsigned long long) tsf,
1388		    (unsigned long long)(tsf - lastrx),
1389		    (unsigned long long) lastrx, bmisstimeout);
1390
1391		if (tsf - lastrx <= bmisstimeout) {
1392			sc->sc_stats.ast_bmiss_phantom++;
1393			return;
1394		}
1395	}
1396	ATH_VAP(vap)->av_bmiss(vap);
1397}
1398
1399static int
1400ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1401{
1402	uint32_t rsize;
1403	void *sp;
1404
1405	if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(&mask), &sp, &rsize))
1406		return 0;
1407	KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1408	*hangs = *(uint32_t *)sp;
1409	return 1;
1410}
1411
1412static void
1413ath_bmiss_proc(void *arg, int pending)
1414{
1415	struct ath_softc *sc = arg;
1416	struct ifnet *ifp = sc->sc_ifp;
1417	uint32_t hangs;
1418
1419	DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1420
1421	if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1422		if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1423		ath_reset(ifp);
1424	} else
1425		ieee80211_beacon_miss(ifp->if_l2com);
1426}
1427
1428/*
1429 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1430 * calcs together with WME.  If necessary disable the crypto
1431 * hardware and mark the 802.11 state so keys will be setup
1432 * with the MIC work done in software.
1433 */
1434static void
1435ath_settkipmic(struct ath_softc *sc)
1436{
1437	struct ifnet *ifp = sc->sc_ifp;
1438	struct ieee80211com *ic = ifp->if_l2com;
1439
1440	if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1441		if (ic->ic_flags & IEEE80211_F_WME) {
1442			ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1443			ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1444		} else {
1445			ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1446			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1447		}
1448	}
1449}
1450
1451static void
1452ath_init(void *arg)
1453{
1454	struct ath_softc *sc = (struct ath_softc *) arg;
1455	struct ifnet *ifp = sc->sc_ifp;
1456	struct ieee80211com *ic = ifp->if_l2com;
1457	struct ath_hal *ah = sc->sc_ah;
1458	HAL_STATUS status;
1459
1460	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1461		__func__, ifp->if_flags);
1462
1463	ATH_LOCK(sc);
1464	/*
1465	 * Stop anything previously setup.  This is safe
1466	 * whether this is the first time through or not.
1467	 */
1468	ath_stop_locked(ifp);
1469
1470	/*
1471	 * The basic interface to setting the hardware in a good
1472	 * state is ``reset''.  On return the hardware is known to
1473	 * be powered up and with interrupts disabled.  This must
1474	 * be followed by initialization of the appropriate bits
1475	 * and then setup of the interrupt mask.
1476	 */
1477	ath_settkipmic(sc);
1478	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1479		if_printf(ifp, "unable to reset hardware; hal status %u\n",
1480			status);
1481		ATH_UNLOCK(sc);
1482		return;
1483	}
1484	ath_chan_change(sc, ic->ic_curchan);
1485
1486	/*
1487	 * Likewise this is set during reset so update
1488	 * state cached in the driver.
1489	 */
1490	sc->sc_diversity = ath_hal_getdiversity(ah);
1491	sc->sc_lastlongcal = 0;
1492	sc->sc_resetcal = 1;
1493	sc->sc_lastcalreset = 0;
1494
1495	/*
1496	 * Setup the hardware after reset: the key cache
1497	 * is filled as needed and the receive engine is
1498	 * set going.  Frame transmit is handled entirely
1499	 * in the frame output path; there's nothing to do
1500	 * here except setup the interrupt mask.
1501	 */
1502	if (ath_startrecv(sc) != 0) {
1503		if_printf(ifp, "unable to start recv logic\n");
1504		ATH_UNLOCK(sc);
1505		return;
1506	}
1507
1508	/*
1509	 * Enable interrupts.
1510	 */
1511	sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1512		  | HAL_INT_RXEOL | HAL_INT_RXORN
1513		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
1514	/*
1515	 * Enable MIB interrupts when there are hardware phy counters.
1516	 * Note we only do this (at the moment) for station mode.
1517	 */
1518	if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1519		sc->sc_imask |= HAL_INT_MIB;
1520
1521	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1522	callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1523	ath_hal_intrset(ah, sc->sc_imask);
1524
1525	ATH_UNLOCK(sc);
1526
1527#ifdef ATH_TX99_DIAG
1528	if (sc->sc_tx99 != NULL)
1529		sc->sc_tx99->start(sc->sc_tx99);
1530	else
1531#endif
1532	ieee80211_start_all(ic);		/* start all vap's */
1533}
1534
1535static void
1536ath_stop_locked(struct ifnet *ifp)
1537{
1538	struct ath_softc *sc = ifp->if_softc;
1539	struct ath_hal *ah = sc->sc_ah;
1540
1541	DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1542		__func__, sc->sc_invalid, ifp->if_flags);
1543
1544	ATH_LOCK_ASSERT(sc);
1545	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1546		/*
1547		 * Shutdown the hardware and driver:
1548		 *    reset 802.11 state machine
1549		 *    turn off timers
1550		 *    disable interrupts
1551		 *    turn off the radio
1552		 *    clear transmit machinery
1553		 *    clear receive machinery
1554		 *    drain and release tx queues
1555		 *    reclaim beacon resources
1556		 *    power down hardware
1557		 *
1558		 * Note that some of this work is not possible if the
1559		 * hardware is gone (invalid).
1560		 */
1561#ifdef ATH_TX99_DIAG
1562		if (sc->sc_tx99 != NULL)
1563			sc->sc_tx99->stop(sc->sc_tx99);
1564#endif
1565		callout_stop(&sc->sc_wd_ch);
1566		sc->sc_wd_timer = 0;
1567		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1568		if (!sc->sc_invalid) {
1569			if (sc->sc_softled) {
1570				callout_stop(&sc->sc_ledtimer);
1571				ath_hal_gpioset(ah, sc->sc_ledpin,
1572					!sc->sc_ledon);
1573				sc->sc_blinking = 0;
1574			}
1575			ath_hal_intrset(ah, 0);
1576		}
1577		ath_draintxq(sc);
1578		if (!sc->sc_invalid) {
1579			ath_stoprecv(sc);
1580			ath_hal_phydisable(ah);
1581		} else
1582			sc->sc_rxlink = NULL;
1583		ath_beacon_free(sc);	/* XXX not needed */
1584	}
1585}
1586
1587static void
1588ath_stop(struct ifnet *ifp)
1589{
1590	struct ath_softc *sc = ifp->if_softc;
1591
1592	ATH_LOCK(sc);
1593	ath_stop_locked(ifp);
1594	ATH_UNLOCK(sc);
1595}
1596
1597/*
1598 * Reset the hardware w/o losing operational state.  This is
1599 * basically a more efficient way of doing ath_stop, ath_init,
1600 * followed by state transitions to the current 802.11
1601 * operational state.  Used to recover from various errors and
1602 * to reset or reload hardware state.
1603 */
1604static int
1605ath_reset(struct ifnet *ifp)
1606{
1607	struct ath_softc *sc = ifp->if_softc;
1608	struct ieee80211com *ic = ifp->if_l2com;
1609	struct ath_hal *ah = sc->sc_ah;
1610	HAL_STATUS status;
1611
1612	ath_hal_intrset(ah, 0);		/* disable interrupts */
1613	ath_draintxq(sc);		/* stop xmit side */
1614	ath_stoprecv(sc);		/* stop recv side */
1615	ath_settkipmic(sc);		/* configure TKIP MIC handling */
1616	/* NB: indicate channel change so we do a full reset */
1617	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
1618		if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1619			__func__, status);
1620	sc->sc_diversity = ath_hal_getdiversity(ah);
1621	if (ath_startrecv(sc) != 0)	/* restart recv */
1622		if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1623	/*
1624	 * We may be doing a reset in response to an ioctl
1625	 * that changes the channel so update any state that
1626	 * might change as a result.
1627	 */
1628	ath_chan_change(sc, ic->ic_curchan);
1629	if (sc->sc_beacons) {
1630#ifdef IEEE80211_SUPPORT_TDMA
1631		if (sc->sc_tdma)
1632			ath_tdma_config(sc, NULL);
1633		else
1634#endif
1635			ath_beacon_config(sc, NULL);	/* restart beacons */
1636	}
1637	ath_hal_intrset(ah, sc->sc_imask);
1638
1639	ath_start(ifp);			/* restart xmit */
1640	return 0;
1641}
1642
1643static int
1644ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1645{
1646	struct ieee80211com *ic = vap->iv_ic;
1647	struct ifnet *ifp = ic->ic_ifp;
1648	struct ath_softc *sc = ifp->if_softc;
1649	struct ath_hal *ah = sc->sc_ah;
1650
1651	switch (cmd) {
1652	case IEEE80211_IOC_TXPOWER:
1653		/*
1654		 * If per-packet TPC is enabled, then we have nothing
1655		 * to do; otherwise we need to force the global limit.
1656		 * All this can happen directly; no need to reset.
1657		 */
1658		if (!ath_hal_gettpc(ah))
1659			ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1660		return 0;
1661	}
1662	return ath_reset(ifp);
1663}
1664
1665static int
1666ath_ff_always(struct ath_txq *txq, struct ath_buf *bf)
1667{
1668	return 0;
1669}
1670
1671#if 0
1672static int
1673ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
1674{
1675	return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX;
1676}
1677#endif
1678
1679/*
1680 * Flush FF staging queue.
1681 */
1682static void
1683ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq,
1684	int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
1685{
1686	struct ath_buf *bf;
1687	struct ieee80211_node *ni;
1688	int pktlen, pri;
1689
1690	for (;;) {
1691		ATH_TXQ_LOCK(txq);
1692		/*
1693		 * Go from the back (oldest) to front so we can
1694		 * stop early based on the age of the entry.
1695		 */
1696		bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
1697		if (bf == NULL || ath_ff_flushdonetest(txq, bf)) {
1698			ATH_TXQ_UNLOCK(txq);
1699			break;
1700		}
1701
1702		ni = bf->bf_node;
1703		pri = M_WME_GETAC(bf->bf_m);
1704		KASSERT(ATH_NODE(ni)->an_ff_buf[pri],
1705			("no bf on staging queue %p", bf));
1706		ATH_NODE(ni)->an_ff_buf[pri] = NULL;
1707		TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
1708
1709		ATH_TXQ_UNLOCK(txq);
1710
1711		DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n",
1712			__func__, bf->bf_age);
1713
1714		sc->sc_stats.ast_ff_flush++;
1715
1716		/* encap and xmit */
1717		bf->bf_m = ieee80211_encap(ni, bf->bf_m);
1718		if (bf->bf_m == NULL) {
1719			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1720				"%s: discard, encapsulation failure\n",
1721				__func__);
1722			sc->sc_stats.ast_tx_encap++;
1723			goto bad;
1724		}
1725		pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */
1726		if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) {
1727#if 0 /*XXX*/
1728			ifp->if_opackets++;
1729#endif
1730			continue;
1731		}
1732	bad:
1733		if (ni != NULL)
1734			ieee80211_free_node(ni);
1735		bf->bf_node = NULL;
1736		if (bf->bf_m != NULL) {
1737			m_freem(bf->bf_m);
1738			bf->bf_m = NULL;
1739		}
1740
1741		ATH_TXBUF_LOCK(sc);
1742		STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1743		ATH_TXBUF_UNLOCK(sc);
1744	}
1745}
1746
1747static __inline u_int32_t
1748ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m)
1749{
1750	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1751	u_int32_t framelen;
1752	struct ath_buf *bf;
1753
1754	/*
1755	 * Approximate the frame length to be transmitted. A swag to add
1756	 * the following maximal values to the skb payload:
1757	 *   - 32: 802.11 encap + CRC
1758	 *   - 24: encryption overhead (if wep bit)
1759	 *   - 4 + 6: fast-frame header and padding
1760	 *   - 16: 2 LLC FF tunnel headers
1761	 *   - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
1762	 */
1763	framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14;
1764	if (ic->ic_flags & IEEE80211_F_PRIVACY)
1765		framelen += 24;
1766	bf = an->an_ff_buf[M_WME_GETAC(m)];
1767	if (bf != NULL)
1768		framelen += bf->bf_m->m_pkthdr.len;
1769	return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
1770			sc->sc_lastdatarix, AH_FALSE);
1771}
1772
1773/*
1774 * Determine if a data frame may be aggregated via ff tunnelling.
1775 * Note the caller is responsible for checking if the destination
1776 * supports fast frames.
1777 *
1778 *  NB: allowing EAPOL frames to be aggregated with other unicast traffic.
1779 *      Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
1780 *      be aggregated with other types of frames when encryption is on?
1781 *
1782 *  NB: assumes lock on an_ff_buf effectively held by txq lock mechanism.
1783 */
1784static __inline int
1785ath_ff_can_aggregate(struct ath_softc *sc,
1786	struct ath_node *an, struct mbuf *m, int *flushq)
1787{
1788	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1789	struct ath_txq *txq;
1790	u_int32_t txoplimit;
1791	u_int pri;
1792
1793	*flushq = 0;
1794
1795	/*
1796	 * If there is no frame to combine with and the txq has
1797	 * fewer frames than the minimum required; then do not
1798	 * attempt to aggregate this frame.
1799	 */
1800	pri = M_WME_GETAC(m);
1801	txq = sc->sc_ac2q[pri];
1802	if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin)
1803		return 0;
1804	/*
1805	 * When not in station mode never aggregate a multicast
1806	 * frame; this insures, for example, that a combined frame
1807	 * does not require multiple encryption keys when using
1808	 * 802.1x/WPA.
1809	 */
1810	if (ic->ic_opmode != IEEE80211_M_STA &&
1811	    ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
1812		return 0;
1813	/*
1814	 * Consult the max bursting interval to insure a combined
1815	 * frame fits within the TxOp window.
1816	 */
1817	txoplimit = IEEE80211_TXOP_TO_US(
1818		ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
1819	if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) {
1820		DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1821			"%s: FF TxOp violation\n", __func__);
1822		if (an->an_ff_buf[pri] != NULL)
1823			*flushq = 1;
1824		return 0;
1825	}
1826	return 1;		/* try to aggregate */
1827}
1828
1829/*
1830 * Check if the supplied frame can be partnered with an existing
1831 * or pending frame.  Return a reference to any frame that should be
1832 * sent on return; otherwise return NULL.
1833 */
1834static struct mbuf *
1835ath_ff_check(struct ath_softc *sc, struct ath_txq *txq,
1836	struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni)
1837{
1838	struct ath_node *an = ATH_NODE(ni);
1839	struct ath_buf *bfstaged;
1840	int ff_flush, pri;
1841
1842	/*
1843	 * Check if the supplied frame can be aggregated.
1844	 *
1845	 * NB: we use the txq lock to protect references to
1846	 *     an->an_ff_txbuf in ath_ff_can_aggregate().
1847	 */
1848	ATH_TXQ_LOCK(txq);
1849	pri = M_WME_GETAC(m);
1850	if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) {
1851		struct ath_buf *bfstaged = an->an_ff_buf[pri];
1852		if (bfstaged != NULL) {
1853			/*
1854			 * A frame is available for partnering; remove
1855			 * it, chain it to this one, and encapsulate.
1856			 */
1857			an->an_ff_buf[pri] = NULL;
1858			TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1859			ATH_TXQ_UNLOCK(txq);
1860
1861			/*
1862			 * Chain mbufs and add FF magic.
1863			 */
1864			DPRINTF(sc, ATH_DEBUG_FF,
1865				"[%s] aggregate fast-frame, age %u\n",
1866				ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1867			m->m_nextpkt = NULL;
1868			bfstaged->bf_m->m_nextpkt = m;
1869			m = bfstaged->bf_m;
1870			bfstaged->bf_m = NULL;
1871			m->m_flags |= M_FF;
1872			/*
1873			 * Release the node reference held while
1874			 * the packet sat on an_ff_buf[]
1875			 */
1876			bfstaged->bf_node = NULL;
1877			ieee80211_free_node(ni);
1878
1879			/*
1880			 * Return bfstaged to the free list.
1881			 */
1882			ATH_TXBUF_LOCK(sc);
1883			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
1884			ATH_TXBUF_UNLOCK(sc);
1885
1886			return m;		/* ready to go */
1887		} else {
1888			/*
1889			 * No frame available, queue this frame to wait
1890			 * for a partner.  Note that we hold the buffer
1891			 * and a reference to the node; we need the
1892			 * buffer in particular so we're certain we
1893			 * can flush the frame at a later time.
1894			 */
1895			DPRINTF(sc, ATH_DEBUG_FF,
1896				"[%s] stage fast-frame, age %u\n",
1897				ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1898
1899			bf->bf_m = m;
1900			bf->bf_node = ni;	/* NB: held reference */
1901			bf->bf_age = txq->axq_curage;
1902			an->an_ff_buf[pri] = bf;
1903			TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
1904			ATH_TXQ_UNLOCK(txq);
1905
1906			return NULL;		/* consumed */
1907		}
1908	}
1909	/*
1910	 * Frame could not be aggregated, it needs to be returned
1911	 * to the caller for immediate transmission.  In addition
1912	 * we check if we should first flush a frame from the
1913	 * staging queue before sending this one.
1914	 *
1915	 * NB: ath_ff_can_aggregate only marks ff_flush if a frame
1916	 *     is present to flush.
1917	 */
1918	if (ff_flush) {
1919		int pktlen;
1920
1921		bfstaged = an->an_ff_buf[pri];
1922		an->an_ff_buf[pri] = NULL;
1923		TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1924		ATH_TXQ_UNLOCK(txq);
1925
1926		DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n",
1927			ether_sprintf(an->an_node.ni_macaddr));
1928
1929		/* encap and xmit */
1930		bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m);
1931		if (bfstaged->bf_m == NULL) {
1932			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1933				"%s: discard, encap failure\n", __func__);
1934			sc->sc_stats.ast_tx_encap++;
1935			goto ff_flushbad;
1936		}
1937		pktlen = bfstaged->bf_m->m_pkthdr.len;
1938		if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) {
1939			DPRINTF(sc, ATH_DEBUG_XMIT,
1940				"%s: discard, xmit failure\n", __func__);
1941	ff_flushbad:
1942			/*
1943			 * Unable to transmit frame that was on the staging
1944			 * queue.  Reclaim the node reference and other
1945			 * resources.
1946			 */
1947			if (ni != NULL)
1948				ieee80211_free_node(ni);
1949			bfstaged->bf_node = NULL;
1950			if (bfstaged->bf_m != NULL) {
1951				m_freem(bfstaged->bf_m);
1952				bfstaged->bf_m = NULL;
1953			}
1954
1955			ATH_TXBUF_LOCK(sc);
1956			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
1957			ATH_TXBUF_UNLOCK(sc);
1958		} else {
1959#if 0
1960			ifp->if_opackets++;
1961#endif
1962		}
1963	} else {
1964		if (an->an_ff_buf[pri] != NULL) {
1965			/*
1966			 * XXX: out-of-order condition only occurs for AP
1967			 * mode and multicast.  There may be no valid way
1968			 * to get this condition.
1969			 */
1970			DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n",
1971				ether_sprintf(an->an_node.ni_macaddr));
1972			/* XXX stat */
1973		}
1974		ATH_TXQ_UNLOCK(txq);
1975	}
1976	return m;
1977}
1978
1979static struct ath_buf *
1980_ath_getbuf_locked(struct ath_softc *sc)
1981{
1982	struct ath_buf *bf;
1983
1984	ATH_TXBUF_LOCK_ASSERT(sc);
1985
1986	bf = STAILQ_FIRST(&sc->sc_txbuf);
1987	if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1988		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1989	else
1990		bf = NULL;
1991	if (bf == NULL) {
1992		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1993		    STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1994			"out of xmit buffers" : "xmit buffer busy");
1995		sc->sc_stats.ast_tx_nobuf++;
1996	}
1997	return bf;
1998}
1999
2000static struct ath_buf *
2001ath_getbuf(struct ath_softc *sc)
2002{
2003	struct ath_buf *bf;
2004
2005	ATH_TXBUF_LOCK(sc);
2006	bf = _ath_getbuf_locked(sc);
2007	if (bf == NULL) {
2008		struct ifnet *ifp = sc->sc_ifp;
2009
2010		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2011		sc->sc_stats.ast_tx_qstop++;
2012		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2013	}
2014	ATH_TXBUF_UNLOCK(sc);
2015	return bf;
2016}
2017
2018/*
2019 * Cleanup driver resources when we run out of buffers
2020 * while processing fragments; return the tx buffers
2021 * allocated and drop node references.
2022 */
2023static void
2024ath_txfrag_cleanup(struct ath_softc *sc,
2025	ath_bufhead *frags, struct ieee80211_node *ni)
2026{
2027	struct ath_buf *bf, *next;
2028
2029	ATH_TXBUF_LOCK_ASSERT(sc);
2030
2031	STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
2032		/* NB: bf assumed clean */
2033		STAILQ_REMOVE_HEAD(frags, bf_list);
2034		STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2035		ieee80211_node_decref(ni);
2036	}
2037}
2038
2039/*
2040 * Setup xmit of a fragmented frame.  Allocate a buffer
2041 * for each frag and bump the node reference count to
2042 * reflect the held reference to be setup by ath_tx_start.
2043 */
2044static int
2045ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
2046	struct mbuf *m0, struct ieee80211_node *ni)
2047{
2048	struct mbuf *m;
2049	struct ath_buf *bf;
2050
2051	ATH_TXBUF_LOCK(sc);
2052	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
2053		bf = _ath_getbuf_locked(sc);
2054		if (bf == NULL) {	/* out of buffers, cleanup */
2055			ath_txfrag_cleanup(sc, frags, ni);
2056			break;
2057		}
2058		ieee80211_node_incref(ni);
2059		STAILQ_INSERT_TAIL(frags, bf, bf_list);
2060	}
2061	ATH_TXBUF_UNLOCK(sc);
2062
2063	return !STAILQ_EMPTY(frags);
2064}
2065
2066static void
2067ath_start(struct ifnet *ifp)
2068{
2069	struct ath_softc *sc = ifp->if_softc;
2070	struct ieee80211_node *ni;
2071	struct ath_buf *bf;
2072	struct mbuf *m, *next;
2073	struct ath_txq *txq;
2074	ath_bufhead frags;
2075	int pri;
2076
2077	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2078		return;
2079	for (;;) {
2080		/*
2081		 * Grab a TX buffer and associated resources.
2082		 */
2083		bf = ath_getbuf(sc);
2084		if (bf == NULL)
2085			break;
2086
2087		IFQ_DEQUEUE(&ifp->if_snd, m);
2088		if (m == NULL) {
2089			ATH_TXBUF_LOCK(sc);
2090			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2091			ATH_TXBUF_UNLOCK(sc);
2092			break;
2093		}
2094		STAILQ_INIT(&frags);
2095		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2096		pri = M_WME_GETAC(m);
2097		txq = sc->sc_ac2q[pri];
2098		if (IEEE80211_ATH_CAP(ni->ni_vap, ni, IEEE80211_NODE_FF)) {
2099			/*
2100			 * Check queue length; if too deep drop this
2101			 * frame (tail drop considered good).
2102			 */
2103			if (txq->axq_depth >= sc->sc_fftxqmax) {
2104				DPRINTF(sc, ATH_DEBUG_FF,
2105				    "[%s] tail drop on q %u depth %u\n",
2106				    ether_sprintf(ni->ni_macaddr),
2107				    txq->axq_qnum, txq->axq_depth);
2108				sc->sc_stats.ast_tx_qfull++;
2109				m_freem(m);
2110				goto reclaim;
2111			}
2112			m = ath_ff_check(sc, txq, bf, m, ni);
2113			if (m == NULL) {
2114				/* NB: ni ref & bf held on stageq */
2115				continue;
2116			}
2117		}
2118		ifp->if_opackets++;
2119		/*
2120		 * Encapsulate the packet in prep for transmission.
2121		 */
2122		m = ieee80211_encap(ni, m);
2123		if (m == NULL) {
2124			DPRINTF(sc, ATH_DEBUG_XMIT,
2125			    "%s: encapsulation failure\n", __func__);
2126			sc->sc_stats.ast_tx_encap++;
2127			goto bad;
2128		}
2129		/*
2130		 * Check for fragmentation.  If this frame
2131		 * has been broken up verify we have enough
2132		 * buffers to send all the fragments so all
2133		 * go out or none...
2134		 */
2135		if ((m->m_flags & M_FRAG) &&
2136		    !ath_txfrag_setup(sc, &frags, m, ni)) {
2137			DPRINTF(sc, ATH_DEBUG_XMIT,
2138			    "%s: out of txfrag buffers\n", __func__);
2139			sc->sc_stats.ast_tx_nofrag++;
2140			ath_freetx(m);
2141			goto bad;
2142		}
2143	nextfrag:
2144		/*
2145		 * Pass the frame to the h/w for transmission.
2146		 * Fragmented frames have each frag chained together
2147		 * with m_nextpkt.  We know there are sufficient ath_buf's
2148		 * to send all the frags because of work done by
2149		 * ath_txfrag_setup.  We leave m_nextpkt set while
2150		 * calling ath_tx_start so it can use it to extend the
2151		 * the tx duration to cover the subsequent frag and
2152		 * so it can reclaim all the mbufs in case of an error;
2153		 * ath_tx_start clears m_nextpkt once it commits to
2154		 * handing the frame to the hardware.
2155		 */
2156		next = m->m_nextpkt;
2157		if (ath_tx_start(sc, ni, bf, m)) {
2158	bad:
2159			ifp->if_oerrors++;
2160	reclaim:
2161			bf->bf_m = NULL;
2162			bf->bf_node = NULL;
2163			ATH_TXBUF_LOCK(sc);
2164			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2165			ath_txfrag_cleanup(sc, &frags, ni);
2166			ATH_TXBUF_UNLOCK(sc);
2167			if (ni != NULL)
2168				ieee80211_free_node(ni);
2169			continue;
2170		}
2171		if (next != NULL) {
2172			/*
2173			 * Beware of state changing between frags.
2174			 * XXX check sta power-save state?
2175			 */
2176			if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2177				DPRINTF(sc, ATH_DEBUG_XMIT,
2178				    "%s: flush fragmented packet, state %s\n",
2179				    __func__,
2180				    ieee80211_state_name[ni->ni_vap->iv_state]);
2181				ath_freetx(next);
2182				goto reclaim;
2183			}
2184			m = next;
2185			bf = STAILQ_FIRST(&frags);
2186			KASSERT(bf != NULL, ("no buf for txfrag"));
2187			STAILQ_REMOVE_HEAD(&frags, bf_list);
2188			goto nextfrag;
2189		}
2190
2191		sc->sc_wd_timer = 5;
2192#if 0
2193		/*
2194		 * Flush stale frames from the fast-frame staging queue.
2195		 */
2196		if (ic->ic_opmode != IEEE80211_M_STA)
2197			ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone);
2198#endif
2199	}
2200}
2201
2202static int
2203ath_media_change(struct ifnet *ifp)
2204{
2205	int error = ieee80211_media_change(ifp);
2206	/* NB: only the fixed rate can change and that doesn't need a reset */
2207	return (error == ENETRESET ? 0 : error);
2208}
2209
2210#ifdef ATH_DEBUG
2211static void
2212ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
2213	const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2214{
2215	static const char *ciphers[] = {
2216		"WEP",
2217		"AES-OCB",
2218		"AES-CCM",
2219		"CKIP",
2220		"TKIP",
2221		"CLR",
2222	};
2223	int i, n;
2224
2225	printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
2226	for (i = 0, n = hk->kv_len; i < n; i++)
2227		printf("%02x", hk->kv_val[i]);
2228	printf(" mac %s", ether_sprintf(mac));
2229	if (hk->kv_type == HAL_CIPHER_TKIP) {
2230		printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
2231		for (i = 0; i < sizeof(hk->kv_mic); i++)
2232			printf("%02x", hk->kv_mic[i]);
2233		if (!sc->sc_splitmic) {
2234			printf(" txmic ");
2235			for (i = 0; i < sizeof(hk->kv_txmic); i++)
2236				printf("%02x", hk->kv_txmic[i]);
2237		}
2238	}
2239	printf("\n");
2240}
2241#endif
2242
2243/*
2244 * Set a TKIP key into the hardware.  This handles the
2245 * potential distribution of key state to multiple key
2246 * cache slots for TKIP.
2247 */
2248static int
2249ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
2250	HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2251{
2252#define	IEEE80211_KEY_XR	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
2253	static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
2254	struct ath_hal *ah = sc->sc_ah;
2255
2256	KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
2257		("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
2258	if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
2259		if (sc->sc_splitmic) {
2260			/*
2261			 * TX key goes at first index, RX key at the rx index.
2262			 * The hal handles the MIC keys at index+64.
2263			 */
2264			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
2265			KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
2266			if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
2267				return 0;
2268
2269			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2270			KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
2271			/* XXX delete tx key on failure? */
2272			return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
2273		} else {
2274			/*
2275			 * Room for both TX+RX MIC keys in one key cache
2276			 * slot, just set key at the first index; the hal
2277			 * will handle the rest.
2278			 */
2279			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2280			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2281			KEYPRINTF(sc, k->wk_keyix, hk, mac);
2282			return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2283		}
2284	} else if (k->wk_flags & IEEE80211_KEY_XMIT) {
2285		if (sc->sc_splitmic) {
2286			/*
2287			 * NB: must pass MIC key in expected location when
2288			 * the keycache only holds one MIC key per entry.
2289			 */
2290			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
2291		} else
2292			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2293		KEYPRINTF(sc, k->wk_keyix, hk, mac);
2294		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2295	} else if (k->wk_flags & IEEE80211_KEY_RECV) {
2296		memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2297		KEYPRINTF(sc, k->wk_keyix, hk, mac);
2298		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2299	}
2300	return 0;
2301#undef IEEE80211_KEY_XR
2302}
2303
2304/*
2305 * Set a net80211 key into the hardware.  This handles the
2306 * potential distribution of key state to multiple key
2307 * cache slots for TKIP with hardware MIC support.
2308 */
2309static int
2310ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
2311	struct ieee80211_node *bss)
2312{
2313#define	N(a)	(sizeof(a)/sizeof(a[0]))
2314	static const u_int8_t ciphermap[] = {
2315		HAL_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
2316		HAL_CIPHER_TKIP,	/* IEEE80211_CIPHER_TKIP */
2317		HAL_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
2318		HAL_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
2319		(u_int8_t) -1,		/* 4 is not allocated */
2320		HAL_CIPHER_CKIP,	/* IEEE80211_CIPHER_CKIP */
2321		HAL_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
2322	};
2323	struct ath_hal *ah = sc->sc_ah;
2324	const struct ieee80211_cipher *cip = k->wk_cipher;
2325	u_int8_t gmac[IEEE80211_ADDR_LEN];
2326	const u_int8_t *mac;
2327	HAL_KEYVAL hk;
2328
2329	memset(&hk, 0, sizeof(hk));
2330	/*
2331	 * Software crypto uses a "clear key" so non-crypto
2332	 * state kept in the key cache are maintained and
2333	 * so that rx frames have an entry to match.
2334	 */
2335	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2336		KASSERT(cip->ic_cipher < N(ciphermap),
2337			("invalid cipher type %u", cip->ic_cipher));
2338		hk.kv_type = ciphermap[cip->ic_cipher];
2339		hk.kv_len = k->wk_keylen;
2340		memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2341	} else
2342		hk.kv_type = HAL_CIPHER_CLR;
2343
2344	if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2345		/*
2346		 * Group keys on hardware that supports multicast frame
2347		 * key search use a mac that is the sender's address with
2348		 * the high bit set instead of the app-specified address.
2349		 */
2350		IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2351		gmac[0] |= 0x80;
2352		mac = gmac;
2353	} else
2354		mac = k->wk_macaddr;
2355
2356	if (hk.kv_type == HAL_CIPHER_TKIP &&
2357	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2358		return ath_keyset_tkip(sc, k, &hk, mac);
2359	} else {
2360		KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2361		return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2362	}
2363#undef N
2364}
2365
2366/*
2367 * Allocate tx/rx key slots for TKIP.  We allocate two slots for
2368 * each key, one for decrypt/encrypt and the other for the MIC.
2369 */
2370static u_int16_t
2371key_alloc_2pair(struct ath_softc *sc,
2372	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2373{
2374#define	N(a)	(sizeof(a)/sizeof(a[0]))
2375	u_int i, keyix;
2376
2377	KASSERT(sc->sc_splitmic, ("key cache !split"));
2378	/* XXX could optimize */
2379	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2380		u_int8_t b = sc->sc_keymap[i];
2381		if (b != 0xff) {
2382			/*
2383			 * One or more slots in this byte are free.
2384			 */
2385			keyix = i*NBBY;
2386			while (b & 1) {
2387		again:
2388				keyix++;
2389				b >>= 1;
2390			}
2391			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2392			if (isset(sc->sc_keymap, keyix+32) ||
2393			    isset(sc->sc_keymap, keyix+64) ||
2394			    isset(sc->sc_keymap, keyix+32+64)) {
2395				/* full pair unavailable */
2396				/* XXX statistic */
2397				if (keyix == (i+1)*NBBY) {
2398					/* no slots were appropriate, advance */
2399					continue;
2400				}
2401				goto again;
2402			}
2403			setbit(sc->sc_keymap, keyix);
2404			setbit(sc->sc_keymap, keyix+64);
2405			setbit(sc->sc_keymap, keyix+32);
2406			setbit(sc->sc_keymap, keyix+32+64);
2407			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2408				"%s: key pair %u,%u %u,%u\n",
2409				__func__, keyix, keyix+64,
2410				keyix+32, keyix+32+64);
2411			*txkeyix = keyix;
2412			*rxkeyix = keyix+32;
2413			return 1;
2414		}
2415	}
2416	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2417	return 0;
2418#undef N
2419}
2420
2421/*
2422 * Allocate tx/rx key slots for TKIP.  We allocate two slots for
2423 * each key, one for decrypt/encrypt and the other for the MIC.
2424 */
2425static u_int16_t
2426key_alloc_pair(struct ath_softc *sc,
2427	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2428{
2429#define	N(a)	(sizeof(a)/sizeof(a[0]))
2430	u_int i, keyix;
2431
2432	KASSERT(!sc->sc_splitmic, ("key cache split"));
2433	/* XXX could optimize */
2434	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2435		u_int8_t b = sc->sc_keymap[i];
2436		if (b != 0xff) {
2437			/*
2438			 * One or more slots in this byte are free.
2439			 */
2440			keyix = i*NBBY;
2441			while (b & 1) {
2442		again:
2443				keyix++;
2444				b >>= 1;
2445			}
2446			if (isset(sc->sc_keymap, keyix+64)) {
2447				/* full pair unavailable */
2448				/* XXX statistic */
2449				if (keyix == (i+1)*NBBY) {
2450					/* no slots were appropriate, advance */
2451					continue;
2452				}
2453				goto again;
2454			}
2455			setbit(sc->sc_keymap, keyix);
2456			setbit(sc->sc_keymap, keyix+64);
2457			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2458				"%s: key pair %u,%u\n",
2459				__func__, keyix, keyix+64);
2460			*txkeyix = *rxkeyix = keyix;
2461			return 1;
2462		}
2463	}
2464	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2465	return 0;
2466#undef N
2467}
2468
2469/*
2470 * Allocate a single key cache slot.
2471 */
2472static int
2473key_alloc_single(struct ath_softc *sc,
2474	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2475{
2476#define	N(a)	(sizeof(a)/sizeof(a[0]))
2477	u_int i, keyix;
2478
2479	/* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2480	for (i = 0; i < N(sc->sc_keymap); i++) {
2481		u_int8_t b = sc->sc_keymap[i];
2482		if (b != 0xff) {
2483			/*
2484			 * One or more slots are free.
2485			 */
2486			keyix = i*NBBY;
2487			while (b & 1)
2488				keyix++, b >>= 1;
2489			setbit(sc->sc_keymap, keyix);
2490			DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2491				__func__, keyix);
2492			*txkeyix = *rxkeyix = keyix;
2493			return 1;
2494		}
2495	}
2496	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2497	return 0;
2498#undef N
2499}
2500
2501/*
2502 * Allocate one or more key cache slots for a uniacst key.  The
2503 * key itself is needed only to identify the cipher.  For hardware
2504 * TKIP with split cipher+MIC keys we allocate two key cache slot
2505 * pairs so that we can setup separate TX and RX MIC keys.  Note
2506 * that the MIC key for a TKIP key at slot i is assumed by the
2507 * hardware to be at slot i+64.  This limits TKIP keys to the first
2508 * 64 entries.
2509 */
2510static int
2511ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
2512	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2513{
2514	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2515
2516	/*
2517	 * Group key allocation must be handled specially for
2518	 * parts that do not support multicast key cache search
2519	 * functionality.  For those parts the key id must match
2520	 * the h/w key index so lookups find the right key.  On
2521	 * parts w/ the key search facility we install the sender's
2522	 * mac address (with the high bit set) and let the hardware
2523	 * find the key w/o using the key id.  This is preferred as
2524	 * it permits us to support multiple users for adhoc and/or
2525	 * multi-station operation.
2526	 */
2527	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||	/* global key */
2528	    ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey)) {
2529		if (!(&vap->iv_nw_keys[0] <= k &&
2530		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
2531			/* should not happen */
2532			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2533				"%s: bogus group key\n", __func__);
2534			return 0;
2535		}
2536		/*
2537		 * XXX we pre-allocate the global keys so
2538		 * have no way to check if they've already been allocated.
2539		 */
2540		*keyix = *rxkeyix = k - vap->iv_nw_keys;
2541		return 1;
2542	}
2543
2544	/*
2545	 * We allocate two pair for TKIP when using the h/w to do
2546	 * the MIC.  For everything else, including software crypto,
2547	 * we allocate a single entry.  Note that s/w crypto requires
2548	 * a pass-through slot on the 5211 and 5212.  The 5210 does
2549	 * not support pass-through cache entries and we map all
2550	 * those requests to slot 0.
2551	 */
2552	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2553		return key_alloc_single(sc, keyix, rxkeyix);
2554	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
2555	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2556		if (sc->sc_splitmic)
2557			return key_alloc_2pair(sc, keyix, rxkeyix);
2558		else
2559			return key_alloc_pair(sc, keyix, rxkeyix);
2560	} else {
2561		return key_alloc_single(sc, keyix, rxkeyix);
2562	}
2563}
2564
2565/*
2566 * Delete an entry in the key cache allocated by ath_key_alloc.
2567 */
2568static int
2569ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
2570{
2571	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2572	struct ath_hal *ah = sc->sc_ah;
2573	const struct ieee80211_cipher *cip = k->wk_cipher;
2574	u_int keyix = k->wk_keyix;
2575
2576	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2577
2578	ath_hal_keyreset(ah, keyix);
2579	/*
2580	 * Handle split tx/rx keying required for TKIP with h/w MIC.
2581	 */
2582	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2583	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2584		ath_hal_keyreset(ah, keyix+32);		/* RX key */
2585	if (keyix >= IEEE80211_WEP_NKID) {
2586		/*
2587		 * Don't touch keymap entries for global keys so
2588		 * they are never considered for dynamic allocation.
2589		 */
2590		clrbit(sc->sc_keymap, keyix);
2591		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2592		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2593			clrbit(sc->sc_keymap, keyix+64);	/* TX key MIC */
2594			if (sc->sc_splitmic) {
2595				/* +32 for RX key, +32+64 for RX key MIC */
2596				clrbit(sc->sc_keymap, keyix+32);
2597				clrbit(sc->sc_keymap, keyix+32+64);
2598			}
2599		}
2600	}
2601	return 1;
2602}
2603
2604/*
2605 * Set the key cache contents for the specified key.  Key cache
2606 * slot(s) must already have been allocated by ath_key_alloc.
2607 */
2608static int
2609ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2610	const u_int8_t mac[IEEE80211_ADDR_LEN])
2611{
2612	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2613
2614	return ath_keyset(sc, k, vap->iv_bss);
2615}
2616
2617/*
2618 * Block/unblock tx+rx processing while a key change is done.
2619 * We assume the caller serializes key management operations
2620 * so we only need to worry about synchronization with other
2621 * uses that originate in the driver.
2622 */
2623static void
2624ath_key_update_begin(struct ieee80211vap *vap)
2625{
2626	struct ifnet *ifp = vap->iv_ic->ic_ifp;
2627	struct ath_softc *sc = ifp->if_softc;
2628
2629	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2630	taskqueue_block(sc->sc_tq);
2631	IF_LOCK(&ifp->if_snd);		/* NB: doesn't block mgmt frames */
2632}
2633
2634static void
2635ath_key_update_end(struct ieee80211vap *vap)
2636{
2637	struct ifnet *ifp = vap->iv_ic->ic_ifp;
2638	struct ath_softc *sc = ifp->if_softc;
2639
2640	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2641	IF_UNLOCK(&ifp->if_snd);
2642	taskqueue_unblock(sc->sc_tq);
2643}
2644
2645/*
2646 * Calculate the receive filter according to the
2647 * operating mode and state:
2648 *
2649 * o always accept unicast, broadcast, and multicast traffic
2650 * o accept PHY error frames when hardware doesn't have MIB support
2651 *   to count and we need them for ANI (sta mode only until recently)
2652 *   and we are not scanning (ANI is disabled)
2653 *   NB: older hal's add rx filter bits out of sight and we need to
2654 *	 blindly preserve them
2655 * o probe request frames are accepted only when operating in
2656 *   hostap, adhoc, or monitor modes
2657 * o enable promiscuous mode
2658 *   - when in monitor mode
2659 *   - if interface marked PROMISC (assumes bridge setting is filtered)
2660 * o accept beacons:
2661 *   - when operating in station mode for collecting rssi data when
2662 *     the station is otherwise quiet, or
2663 *   - when operating in adhoc mode so the 802.11 layer creates
2664 *     node table entries for peers,
2665 *   - when scanning
2666 *   - when doing s/w beacon miss (e.g. for ap+sta)
2667 *   - when operating in ap mode in 11g to detect overlapping bss that
2668 *     require protection
2669 * o accept control frames:
2670 *   - when in monitor mode
2671 * XXX BAR frames for 11n
2672 * XXX HT protection for 11n
2673 */
2674static u_int32_t
2675ath_calcrxfilter(struct ath_softc *sc)
2676{
2677	struct ifnet *ifp = sc->sc_ifp;
2678	struct ieee80211com *ic = ifp->if_l2com;
2679	u_int32_t rfilt;
2680
2681	rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2682	if (!sc->sc_needmib && !sc->sc_scanning)
2683		rfilt |= HAL_RX_FILTER_PHYERR;
2684	if (ic->ic_opmode != IEEE80211_M_STA)
2685		rfilt |= HAL_RX_FILTER_PROBEREQ;
2686	if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2687		rfilt |= HAL_RX_FILTER_PROM;
2688	if (ic->ic_opmode == IEEE80211_M_STA ||
2689	    ic->ic_opmode == IEEE80211_M_IBSS ||
2690	    sc->sc_swbmiss || sc->sc_scanning)
2691		rfilt |= HAL_RX_FILTER_BEACON;
2692	/*
2693	 * NB: We don't recalculate the rx filter when
2694	 * ic_protmode changes; otherwise we could do
2695	 * this only when ic_protmode != NONE.
2696	 */
2697	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2698	    IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2699		rfilt |= HAL_RX_FILTER_BEACON;
2700	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2701		rfilt |= HAL_RX_FILTER_CONTROL;
2702	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2703	    __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2704	return rfilt;
2705}
2706
2707static void
2708ath_update_promisc(struct ifnet *ifp)
2709{
2710	struct ath_softc *sc = ifp->if_softc;
2711	u_int32_t rfilt;
2712
2713	/* configure rx filter */
2714	rfilt = ath_calcrxfilter(sc);
2715	ath_hal_setrxfilter(sc->sc_ah, rfilt);
2716
2717	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2718}
2719
2720static void
2721ath_update_mcast(struct ifnet *ifp)
2722{
2723	struct ath_softc *sc = ifp->if_softc;
2724	u_int32_t mfilt[2];
2725
2726	/* calculate and install multicast filter */
2727	if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2728		struct ifmultiaddr *ifma;
2729		/*
2730		 * Merge multicast addresses to form the hardware filter.
2731		 */
2732		mfilt[0] = mfilt[1] = 0;
2733		IF_ADDR_LOCK(ifp);	/* XXX need some fiddling to remove? */
2734		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2735			caddr_t dl;
2736			u_int32_t val;
2737			u_int8_t pos;
2738
2739			/* calculate XOR of eight 6bit values */
2740			dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2741			val = LE_READ_4(dl + 0);
2742			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2743			val = LE_READ_4(dl + 3);
2744			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2745			pos &= 0x3f;
2746			mfilt[pos / 32] |= (1 << (pos % 32));
2747		}
2748		IF_ADDR_UNLOCK(ifp);
2749	} else
2750		mfilt[0] = mfilt[1] = ~0;
2751	ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2752	DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2753		__func__, mfilt[0], mfilt[1]);
2754}
2755
2756static void
2757ath_mode_init(struct ath_softc *sc)
2758{
2759	struct ifnet *ifp = sc->sc_ifp;
2760	struct ath_hal *ah = sc->sc_ah;
2761	u_int32_t rfilt;
2762
2763	/* configure rx filter */
2764	rfilt = ath_calcrxfilter(sc);
2765	ath_hal_setrxfilter(ah, rfilt);
2766
2767	/* configure operational mode */
2768	ath_hal_setopmode(ah);
2769
2770	/* handle any link-level address change */
2771	ath_hal_setmac(ah, IF_LLADDR(ifp));
2772
2773	/* calculate and install multicast filter */
2774	ath_update_mcast(ifp);
2775}
2776
2777/*
2778 * Set the slot time based on the current setting.
2779 */
2780static void
2781ath_setslottime(struct ath_softc *sc)
2782{
2783	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2784	struct ath_hal *ah = sc->sc_ah;
2785	u_int usec;
2786
2787	if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2788		usec = 13;
2789	else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2790		usec = 21;
2791	else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2792		/* honor short/long slot time only in 11g */
2793		/* XXX shouldn't honor on pure g or turbo g channel */
2794		if (ic->ic_flags & IEEE80211_F_SHSLOT)
2795			usec = HAL_SLOT_TIME_9;
2796		else
2797			usec = HAL_SLOT_TIME_20;
2798	} else
2799		usec = HAL_SLOT_TIME_9;
2800
2801	DPRINTF(sc, ATH_DEBUG_RESET,
2802	    "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2803	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2804	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2805
2806	ath_hal_setslottime(ah, usec);
2807	sc->sc_updateslot = OK;
2808}
2809
2810/*
2811 * Callback from the 802.11 layer to update the
2812 * slot time based on the current setting.
2813 */
2814static void
2815ath_updateslot(struct ifnet *ifp)
2816{
2817	struct ath_softc *sc = ifp->if_softc;
2818	struct ieee80211com *ic = ifp->if_l2com;
2819
2820	/*
2821	 * When not coordinating the BSS, change the hardware
2822	 * immediately.  For other operation we defer the change
2823	 * until beacon updates have propagated to the stations.
2824	 */
2825	if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2826		sc->sc_updateslot = UPDATE;
2827	else
2828		ath_setslottime(sc);
2829}
2830
2831/*
2832 * Setup a h/w transmit queue for beacons.
2833 */
2834static int
2835ath_beaconq_setup(struct ath_hal *ah)
2836{
2837	HAL_TXQ_INFO qi;
2838
2839	memset(&qi, 0, sizeof(qi));
2840	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2841	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2842	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2843	/* NB: for dynamic turbo, don't enable any other interrupts */
2844	qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2845	return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2846}
2847
2848/*
2849 * Setup the transmit queue parameters for the beacon queue.
2850 */
2851static int
2852ath_beaconq_config(struct ath_softc *sc)
2853{
2854#define	ATH_EXPONENT_TO_VALUE(v)	((1<<(v))-1)
2855	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2856	struct ath_hal *ah = sc->sc_ah;
2857	HAL_TXQ_INFO qi;
2858
2859	ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2860	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2861		/*
2862		 * Always burst out beacon and CAB traffic.
2863		 */
2864		qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2865		qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2866		qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2867	} else {
2868		struct wmeParams *wmep =
2869			&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2870		/*
2871		 * Adhoc mode; important thing is to use 2x cwmin.
2872		 */
2873		qi.tqi_aifs = wmep->wmep_aifsn;
2874		qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2875		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2876	}
2877
2878	if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2879		device_printf(sc->sc_dev, "unable to update parameters for "
2880			"beacon hardware queue!\n");
2881		return 0;
2882	} else {
2883		ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2884		return 1;
2885	}
2886#undef ATH_EXPONENT_TO_VALUE
2887}
2888
2889/*
2890 * Allocate and setup an initial beacon frame.
2891 */
2892static int
2893ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2894{
2895	struct ieee80211vap *vap = ni->ni_vap;
2896	struct ath_vap *avp = ATH_VAP(vap);
2897	struct ath_buf *bf;
2898	struct mbuf *m;
2899	int error;
2900
2901	bf = avp->av_bcbuf;
2902	if (bf->bf_m != NULL) {
2903		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2904		m_freem(bf->bf_m);
2905		bf->bf_m = NULL;
2906	}
2907	if (bf->bf_node != NULL) {
2908		ieee80211_free_node(bf->bf_node);
2909		bf->bf_node = NULL;
2910	}
2911
2912	/*
2913	 * NB: the beacon data buffer must be 32-bit aligned;
2914	 * we assume the mbuf routines will return us something
2915	 * with this alignment (perhaps should assert).
2916	 */
2917	m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2918	if (m == NULL) {
2919		device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2920		sc->sc_stats.ast_be_nombuf++;
2921		return ENOMEM;
2922	}
2923	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2924				     bf->bf_segs, &bf->bf_nseg,
2925				     BUS_DMA_NOWAIT);
2926	if (error != 0) {
2927		device_printf(sc->sc_dev,
2928		    "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2929		    __func__, error);
2930		m_freem(m);
2931		return error;
2932	}
2933
2934	/*
2935	 * Calculate a TSF adjustment factor required for staggered
2936	 * beacons.  Note that we assume the format of the beacon
2937	 * frame leaves the tstamp field immediately following the
2938	 * header.
2939	 */
2940	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2941		uint64_t tsfadjust;
2942		struct ieee80211_frame *wh;
2943
2944		/*
2945		 * The beacon interval is in TU's; the TSF is in usecs.
2946		 * We figure out how many TU's to add to align the timestamp
2947		 * then convert to TSF units and handle byte swapping before
2948		 * inserting it in the frame.  The hardware will then add this
2949		 * each time a beacon frame is sent.  Note that we align vap's
2950		 * 1..N and leave vap 0 untouched.  This means vap 0 has a
2951		 * timestamp in one beacon interval while the others get a
2952		 * timstamp aligned to the next interval.
2953		 */
2954		tsfadjust = ni->ni_intval *
2955		    (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2956		tsfadjust = htole64(tsfadjust << 10);	/* TU -> TSF */
2957
2958		DPRINTF(sc, ATH_DEBUG_BEACON,
2959		    "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2960		    __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2961		    avp->av_bslot, ni->ni_intval,
2962		    (long long unsigned) le64toh(tsfadjust));
2963
2964		wh = mtod(m, struct ieee80211_frame *);
2965		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2966	}
2967	bf->bf_m = m;
2968	bf->bf_node = ieee80211_ref_node(ni);
2969
2970	return 0;
2971}
2972
2973/*
2974 * Setup the beacon frame for transmit.
2975 */
2976static void
2977ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2978{
2979#define	USE_SHPREAMBLE(_ic) \
2980	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2981		== IEEE80211_F_SHPREAMBLE)
2982	struct ieee80211_node *ni = bf->bf_node;
2983	struct ieee80211com *ic = ni->ni_ic;
2984	struct mbuf *m = bf->bf_m;
2985	struct ath_hal *ah = sc->sc_ah;
2986	struct ath_desc *ds;
2987	int flags, antenna;
2988	const HAL_RATE_TABLE *rt;
2989	u_int8_t rix, rate;
2990
2991	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2992		__func__, m, m->m_len);
2993
2994	/* setup descriptors */
2995	ds = bf->bf_desc;
2996
2997	flags = HAL_TXDESC_NOACK;
2998	if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2999		ds->ds_link = bf->bf_daddr;	/* self-linked */
3000		flags |= HAL_TXDESC_VEOL;
3001		/*
3002		 * Let hardware handle antenna switching.
3003		 */
3004		antenna = sc->sc_txantenna;
3005	} else {
3006		ds->ds_link = 0;
3007		/*
3008		 * Switch antenna every 4 beacons.
3009		 * XXX assumes two antenna
3010		 */
3011		if (sc->sc_txantenna != 0)
3012			antenna = sc->sc_txantenna;
3013		else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
3014			antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
3015		else
3016			antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
3017	}
3018
3019	KASSERT(bf->bf_nseg == 1,
3020		("multi-segment beacon frame; nseg %u", bf->bf_nseg));
3021	ds->ds_data = bf->bf_segs[0].ds_addr;
3022	/*
3023	 * Calculate rate code.
3024	 * XXX everything at min xmit rate
3025	 */
3026	rix = 0;
3027	rt = sc->sc_currates;
3028	rate = rt->info[rix].rateCode;
3029	if (USE_SHPREAMBLE(ic))
3030		rate |= rt->info[rix].shortPreamble;
3031	ath_hal_setuptxdesc(ah, ds
3032		, m->m_len + IEEE80211_CRC_LEN	/* frame length */
3033		, sizeof(struct ieee80211_frame)/* header length */
3034		, HAL_PKT_TYPE_BEACON		/* Atheros packet type */
3035		, ni->ni_txpower		/* txpower XXX */
3036		, rate, 1			/* series 0 rate/tries */
3037		, HAL_TXKEYIX_INVALID		/* no encryption */
3038		, antenna			/* antenna mode */
3039		, flags				/* no ack, veol for beacons */
3040		, 0				/* rts/cts rate */
3041		, 0				/* rts/cts duration */
3042	);
3043	/* NB: beacon's BufLen must be a multiple of 4 bytes */
3044	ath_hal_filltxdesc(ah, ds
3045		, roundup(m->m_len, 4)		/* buffer length */
3046		, AH_TRUE			/* first segment */
3047		, AH_TRUE			/* last segment */
3048		, ds				/* first descriptor */
3049	);
3050#if 0
3051	ath_desc_swap(ds);
3052#endif
3053#undef USE_SHPREAMBLE
3054}
3055
3056static void
3057ath_beacon_update(struct ieee80211vap *vap, int item)
3058{
3059	struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
3060
3061	setbit(bo->bo_flags, item);
3062}
3063
3064/*
3065 * Append the contents of src to dst; both queues
3066 * are assumed to be locked.
3067 */
3068static void
3069ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3070{
3071	STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
3072	dst->axq_link = src->axq_link;
3073	src->axq_link = NULL;
3074	dst->axq_depth += src->axq_depth;
3075	src->axq_depth = 0;
3076}
3077
3078/*
3079 * Transmit a beacon frame at SWBA.  Dynamic updates to the
3080 * frame contents are done as needed and the slot time is
3081 * also adjusted based on current state.
3082 */
3083static void
3084ath_beacon_proc(void *arg, int pending)
3085{
3086	struct ath_softc *sc = arg;
3087	struct ath_hal *ah = sc->sc_ah;
3088	struct ieee80211vap *vap;
3089	struct ath_buf *bf;
3090	int slot, otherant;
3091	uint32_t bfaddr;
3092
3093	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
3094		__func__, pending);
3095	/*
3096	 * Check if the previous beacon has gone out.  If
3097	 * not don't try to post another, skip this period
3098	 * and wait for the next.  Missed beacons indicate
3099	 * a problem and should not occur.  If we miss too
3100	 * many consecutive beacons reset the device.
3101	 */
3102	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
3103		sc->sc_bmisscount++;
3104		DPRINTF(sc, ATH_DEBUG_BEACON,
3105			"%s: missed %u consecutive beacons\n",
3106			__func__, sc->sc_bmisscount);
3107		if (sc->sc_bmisscount >= ath_bstuck_threshold)
3108			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
3109		return;
3110	}
3111	if (sc->sc_bmisscount != 0) {
3112		DPRINTF(sc, ATH_DEBUG_BEACON,
3113			"%s: resume beacon xmit after %u misses\n",
3114			__func__, sc->sc_bmisscount);
3115		sc->sc_bmisscount = 0;
3116	}
3117
3118	if (sc->sc_stagbeacons) {			/* staggered beacons */
3119		struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3120		uint32_t tsftu;
3121
3122		tsftu = ath_hal_gettsf32(ah) >> 10;
3123		/* XXX lintval */
3124		slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
3125		vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
3126		bfaddr = 0;
3127		if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3128			bf = ath_beacon_generate(sc, vap);
3129			if (bf != NULL)
3130				bfaddr = bf->bf_daddr;
3131		}
3132	} else {					/* burst'd beacons */
3133		uint32_t *bflink = &bfaddr;
3134
3135		for (slot = 0; slot < ATH_BCBUF; slot++) {
3136			vap = sc->sc_bslot[slot];
3137			if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3138				bf = ath_beacon_generate(sc, vap);
3139				if (bf != NULL) {
3140					*bflink = bf->bf_daddr;
3141					bflink = &bf->bf_desc->ds_link;
3142				}
3143			}
3144		}
3145		*bflink = 0;				/* terminate list */
3146	}
3147
3148	/*
3149	 * Handle slot time change when a non-ERP station joins/leaves
3150	 * an 11g network.  The 802.11 layer notifies us via callback,
3151	 * we mark updateslot, then wait one beacon before effecting
3152	 * the change.  This gives associated stations at least one
3153	 * beacon interval to note the state change.
3154	 */
3155	/* XXX locking */
3156	if (sc->sc_updateslot == UPDATE) {
3157		sc->sc_updateslot = COMMIT;	/* commit next beacon */
3158		sc->sc_slotupdate = slot;
3159	} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
3160		ath_setslottime(sc);		/* commit change to h/w */
3161
3162	/*
3163	 * Check recent per-antenna transmit statistics and flip
3164	 * the default antenna if noticeably more frames went out
3165	 * on the non-default antenna.
3166	 * XXX assumes 2 anntenae
3167	 */
3168	if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
3169		otherant = sc->sc_defant & 1 ? 2 : 1;
3170		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
3171			ath_setdefantenna(sc, otherant);
3172		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
3173	}
3174
3175	if (bfaddr != 0) {
3176		/*
3177		 * Stop any current dma and put the new frame on the queue.
3178		 * This should never fail since we check above that no frames
3179		 * are still pending on the queue.
3180		 */
3181		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
3182			DPRINTF(sc, ATH_DEBUG_ANY,
3183				"%s: beacon queue %u did not stop?\n",
3184				__func__, sc->sc_bhalq);
3185		}
3186		/* NB: cabq traffic should already be queued and primed */
3187		ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
3188		ath_hal_txstart(ah, sc->sc_bhalq);
3189
3190		sc->sc_stats.ast_be_xmit++;
3191	}
3192}
3193
3194static struct ath_buf *
3195ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
3196{
3197	struct ath_vap *avp = ATH_VAP(vap);
3198	struct ath_txq *cabq = sc->sc_cabq;
3199	struct ath_buf *bf;
3200	struct mbuf *m;
3201	int nmcastq, error;
3202
3203	KASSERT(vap->iv_state == IEEE80211_S_RUN,
3204	    ("not running, state %d", vap->iv_state));
3205	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3206
3207	/*
3208	 * Update dynamic beacon contents.  If this returns
3209	 * non-zero then we need to remap the memory because
3210	 * the beacon frame changed size (probably because
3211	 * of the TIM bitmap).
3212	 */
3213	bf = avp->av_bcbuf;
3214	m = bf->bf_m;
3215	nmcastq = avp->av_mcastq.axq_depth;
3216	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
3217		/* XXX too conservative? */
3218		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3219		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3220					     bf->bf_segs, &bf->bf_nseg,
3221					     BUS_DMA_NOWAIT);
3222		if (error != 0) {
3223			if_printf(vap->iv_ifp,
3224			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3225			    __func__, error);
3226			return NULL;
3227		}
3228	}
3229	if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3230		DPRINTF(sc, ATH_DEBUG_BEACON,
3231		    "%s: cabq did not drain, mcastq %u cabq %u\n",
3232		    __func__, nmcastq, cabq->axq_depth);
3233		sc->sc_stats.ast_cabq_busy++;
3234		if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3235			/*
3236			 * CABQ traffic from a previous vap is still pending.
3237			 * We must drain the q before this beacon frame goes
3238			 * out as otherwise this vap's stations will get cab
3239			 * frames from a different vap.
3240			 * XXX could be slow causing us to miss DBA
3241			 */
3242			ath_tx_draintxq(sc, cabq);
3243		}
3244	}
3245	ath_beacon_setup(sc, bf);
3246	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3247
3248	/*
3249	 * Enable the CAB queue before the beacon queue to
3250	 * insure cab frames are triggered by this beacon.
3251	 */
3252	if (avp->av_boff.bo_tim[4] & 1) {
3253		struct ath_hal *ah = sc->sc_ah;
3254
3255		/* NB: only at DTIM */
3256		ATH_TXQ_LOCK(cabq);
3257		ATH_TXQ_LOCK(&avp->av_mcastq);
3258		if (nmcastq) {
3259			struct ath_buf *bfm;
3260
3261			/*
3262			 * Move frames from the s/w mcast q to the h/w cab q.
3263			 * XXX MORE_DATA bit
3264			 */
3265			bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
3266			if (cabq->axq_link != NULL) {
3267				*cabq->axq_link = bfm->bf_daddr;
3268			} else
3269				ath_hal_puttxbuf(ah, cabq->axq_qnum,
3270					bfm->bf_daddr);
3271			ath_txqmove(cabq, &avp->av_mcastq);
3272
3273			sc->sc_stats.ast_cabq_xmit += nmcastq;
3274		}
3275		/* NB: gated by beacon so safe to start here */
3276		ath_hal_txstart(ah, cabq->axq_qnum);
3277		ATH_TXQ_UNLOCK(cabq);
3278		ATH_TXQ_UNLOCK(&avp->av_mcastq);
3279	}
3280	return bf;
3281}
3282
3283static void
3284ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3285{
3286	struct ath_vap *avp = ATH_VAP(vap);
3287	struct ath_hal *ah = sc->sc_ah;
3288	struct ath_buf *bf;
3289	struct mbuf *m;
3290	int error;
3291
3292	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3293
3294	/*
3295	 * Update dynamic beacon contents.  If this returns
3296	 * non-zero then we need to remap the memory because
3297	 * the beacon frame changed size (probably because
3298	 * of the TIM bitmap).
3299	 */
3300	bf = avp->av_bcbuf;
3301	m = bf->bf_m;
3302	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3303		/* XXX too conservative? */
3304		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3305		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3306					     bf->bf_segs, &bf->bf_nseg,
3307					     BUS_DMA_NOWAIT);
3308		if (error != 0) {
3309			if_printf(vap->iv_ifp,
3310			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3311			    __func__, error);
3312			return;
3313		}
3314	}
3315	ath_beacon_setup(sc, bf);
3316	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3317
3318	/* NB: caller is known to have already stopped tx dma */
3319	ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3320	ath_hal_txstart(ah, sc->sc_bhalq);
3321}
3322
3323/*
3324 * Reset the hardware after detecting beacons have stopped.
3325 */
3326static void
3327ath_bstuck_proc(void *arg, int pending)
3328{
3329	struct ath_softc *sc = arg;
3330	struct ifnet *ifp = sc->sc_ifp;
3331
3332	if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3333		sc->sc_bmisscount);
3334	sc->sc_stats.ast_bstuck++;
3335	ath_reset(ifp);
3336}
3337
3338/*
3339 * Reclaim beacon resources and return buffer to the pool.
3340 */
3341static void
3342ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3343{
3344
3345	if (bf->bf_m != NULL) {
3346		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3347		m_freem(bf->bf_m);
3348		bf->bf_m = NULL;
3349	}
3350	if (bf->bf_node != NULL) {
3351		ieee80211_free_node(bf->bf_node);
3352		bf->bf_node = NULL;
3353	}
3354	STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3355}
3356
3357/*
3358 * Reclaim beacon resources.
3359 */
3360static void
3361ath_beacon_free(struct ath_softc *sc)
3362{
3363	struct ath_buf *bf;
3364
3365	STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3366		if (bf->bf_m != NULL) {
3367			bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3368			m_freem(bf->bf_m);
3369			bf->bf_m = NULL;
3370		}
3371		if (bf->bf_node != NULL) {
3372			ieee80211_free_node(bf->bf_node);
3373			bf->bf_node = NULL;
3374		}
3375	}
3376}
3377
3378/*
3379 * Configure the beacon and sleep timers.
3380 *
3381 * When operating as an AP this resets the TSF and sets
3382 * up the hardware to notify us when we need to issue beacons.
3383 *
3384 * When operating in station mode this sets up the beacon
3385 * timers according to the timestamp of the last received
3386 * beacon and the current TSF, configures PCF and DTIM
3387 * handling, programs the sleep registers so the hardware
3388 * will wakeup in time to receive beacons, and configures
3389 * the beacon miss handling so we'll receive a BMISS
3390 * interrupt when we stop seeing beacons from the AP
3391 * we've associated with.
3392 */
3393static void
3394ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3395{
3396#define	TSF_TO_TU(_h,_l) \
3397	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3398#define	FUDGE	2
3399	struct ath_hal *ah = sc->sc_ah;
3400	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3401	struct ieee80211_node *ni;
3402	u_int32_t nexttbtt, intval, tsftu;
3403	u_int64_t tsf;
3404
3405	if (vap == NULL)
3406		vap = TAILQ_FIRST(&ic->ic_vaps);	/* XXX */
3407	ni = vap->iv_bss;
3408
3409	/* extract tstamp from last beacon and convert to TU */
3410	nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3411			     LE_READ_4(ni->ni_tstamp.data));
3412	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3413		/*
3414		 * For multi-bss ap support beacons are either staggered
3415		 * evenly over N slots or burst together.  For the former
3416		 * arrange for the SWBA to be delivered for each slot.
3417		 * Slots that are not occupied will generate nothing.
3418		 */
3419		/* NB: the beacon interval is kept internally in TU's */
3420		intval = ni->ni_intval & HAL_BEACON_PERIOD;
3421		if (sc->sc_stagbeacons)
3422			intval /= ATH_BCBUF;
3423	} else {
3424		/* NB: the beacon interval is kept internally in TU's */
3425		intval = ni->ni_intval & HAL_BEACON_PERIOD;
3426	}
3427	if (nexttbtt == 0)		/* e.g. for ap mode */
3428		nexttbtt = intval;
3429	else if (intval)		/* NB: can be 0 for monitor mode */
3430		nexttbtt = roundup(nexttbtt, intval);
3431	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3432		__func__, nexttbtt, intval, ni->ni_intval);
3433	if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3434		HAL_BEACON_STATE bs;
3435		int dtimperiod, dtimcount;
3436		int cfpperiod, cfpcount;
3437
3438		/*
3439		 * Setup dtim and cfp parameters according to
3440		 * last beacon we received (which may be none).
3441		 */
3442		dtimperiod = ni->ni_dtim_period;
3443		if (dtimperiod <= 0)		/* NB: 0 if not known */
3444			dtimperiod = 1;
3445		dtimcount = ni->ni_dtim_count;
3446		if (dtimcount >= dtimperiod)	/* NB: sanity check */
3447			dtimcount = 0;		/* XXX? */
3448		cfpperiod = 1;			/* NB: no PCF support yet */
3449		cfpcount = 0;
3450		/*
3451		 * Pull nexttbtt forward to reflect the current
3452		 * TSF and calculate dtim+cfp state for the result.
3453		 */
3454		tsf = ath_hal_gettsf64(ah);
3455		tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3456		do {
3457			nexttbtt += intval;
3458			if (--dtimcount < 0) {
3459				dtimcount = dtimperiod - 1;
3460				if (--cfpcount < 0)
3461					cfpcount = cfpperiod - 1;
3462			}
3463		} while (nexttbtt < tsftu);
3464		memset(&bs, 0, sizeof(bs));
3465		bs.bs_intval = intval;
3466		bs.bs_nexttbtt = nexttbtt;
3467		bs.bs_dtimperiod = dtimperiod*intval;
3468		bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3469		bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3470		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3471		bs.bs_cfpmaxduration = 0;
3472#if 0
3473		/*
3474		 * The 802.11 layer records the offset to the DTIM
3475		 * bitmap while receiving beacons; use it here to
3476		 * enable h/w detection of our AID being marked in
3477		 * the bitmap vector (to indicate frames for us are
3478		 * pending at the AP).
3479		 * XXX do DTIM handling in s/w to WAR old h/w bugs
3480		 * XXX enable based on h/w rev for newer chips
3481		 */
3482		bs.bs_timoffset = ni->ni_timoff;
3483#endif
3484		/*
3485		 * Calculate the number of consecutive beacons to miss
3486		 * before taking a BMISS interrupt.
3487		 * Note that we clamp the result to at most 10 beacons.
3488		 */
3489		bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3490		if (bs.bs_bmissthreshold > 10)
3491			bs.bs_bmissthreshold = 10;
3492		else if (bs.bs_bmissthreshold <= 0)
3493			bs.bs_bmissthreshold = 1;
3494
3495		/*
3496		 * Calculate sleep duration.  The configuration is
3497		 * given in ms.  We insure a multiple of the beacon
3498		 * period is used.  Also, if the sleep duration is
3499		 * greater than the DTIM period then it makes senses
3500		 * to make it a multiple of that.
3501		 *
3502		 * XXX fixed at 100ms
3503		 */
3504		bs.bs_sleepduration =
3505			roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3506		if (bs.bs_sleepduration > bs.bs_dtimperiod)
3507			bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3508
3509		DPRINTF(sc, ATH_DEBUG_BEACON,
3510			"%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3511			, __func__
3512			, tsf, tsftu
3513			, bs.bs_intval
3514			, bs.bs_nexttbtt
3515			, bs.bs_dtimperiod
3516			, bs.bs_nextdtim
3517			, bs.bs_bmissthreshold
3518			, bs.bs_sleepduration
3519			, bs.bs_cfpperiod
3520			, bs.bs_cfpmaxduration
3521			, bs.bs_cfpnext
3522			, bs.bs_timoffset
3523		);
3524		ath_hal_intrset(ah, 0);
3525		ath_hal_beacontimers(ah, &bs);
3526		sc->sc_imask |= HAL_INT_BMISS;
3527		ath_hal_intrset(ah, sc->sc_imask);
3528	} else {
3529		ath_hal_intrset(ah, 0);
3530		if (nexttbtt == intval)
3531			intval |= HAL_BEACON_RESET_TSF;
3532		if (ic->ic_opmode == IEEE80211_M_IBSS) {
3533			/*
3534			 * In IBSS mode enable the beacon timers but only
3535			 * enable SWBA interrupts if we need to manually
3536			 * prepare beacon frames.  Otherwise we use a
3537			 * self-linked tx descriptor and let the hardware
3538			 * deal with things.
3539			 */
3540			intval |= HAL_BEACON_ENA;
3541			if (!sc->sc_hasveol)
3542				sc->sc_imask |= HAL_INT_SWBA;
3543			if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3544				/*
3545				 * Pull nexttbtt forward to reflect
3546				 * the current TSF.
3547				 */
3548				tsf = ath_hal_gettsf64(ah);
3549				tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3550				do {
3551					nexttbtt += intval;
3552				} while (nexttbtt < tsftu);
3553			}
3554			ath_beaconq_config(sc);
3555		} else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3556			/*
3557			 * In AP mode we enable the beacon timers and
3558			 * SWBA interrupts to prepare beacon frames.
3559			 */
3560			intval |= HAL_BEACON_ENA;
3561			sc->sc_imask |= HAL_INT_SWBA;	/* beacon prepare */
3562			ath_beaconq_config(sc);
3563		}
3564		ath_hal_beaconinit(ah, nexttbtt, intval);
3565		sc->sc_bmisscount = 0;
3566		ath_hal_intrset(ah, sc->sc_imask);
3567		/*
3568		 * When using a self-linked beacon descriptor in
3569		 * ibss mode load it once here.
3570		 */
3571		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3572			ath_beacon_start_adhoc(sc, vap);
3573	}
3574	sc->sc_syncbeacon = 0;
3575#undef FUDGE
3576#undef TSF_TO_TU
3577}
3578
3579static void
3580ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3581{
3582	bus_addr_t *paddr = (bus_addr_t*) arg;
3583	KASSERT(error == 0, ("error %u on bus_dma callback", error));
3584	*paddr = segs->ds_addr;
3585}
3586
3587static int
3588ath_descdma_setup(struct ath_softc *sc,
3589	struct ath_descdma *dd, ath_bufhead *head,
3590	const char *name, int nbuf, int ndesc)
3591{
3592#define	DS2PHYS(_dd, _ds) \
3593	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3594	struct ifnet *ifp = sc->sc_ifp;
3595	struct ath_desc *ds;
3596	struct ath_buf *bf;
3597	int i, bsize, error;
3598
3599	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3600	    __func__, name, nbuf, ndesc);
3601
3602	dd->dd_name = name;
3603	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
3604
3605	/*
3606	 * Setup DMA descriptor area.
3607	 */
3608	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
3609		       PAGE_SIZE, 0,		/* alignment, bounds */
3610		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
3611		       BUS_SPACE_MAXADDR,	/* highaddr */
3612		       NULL, NULL,		/* filter, filterarg */
3613		       dd->dd_desc_len,		/* maxsize */
3614		       1,			/* nsegments */
3615		       dd->dd_desc_len,		/* maxsegsize */
3616		       BUS_DMA_ALLOCNOW,	/* flags */
3617		       NULL,			/* lockfunc */
3618		       NULL,			/* lockarg */
3619		       &dd->dd_dmat);
3620	if (error != 0) {
3621		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3622		return error;
3623	}
3624
3625	/* allocate descriptors */
3626	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3627	if (error != 0) {
3628		if_printf(ifp, "unable to create dmamap for %s descriptors, "
3629			"error %u\n", dd->dd_name, error);
3630		goto fail0;
3631	}
3632
3633	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3634				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3635				 &dd->dd_dmamap);
3636	if (error != 0) {
3637		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3638			"error %u\n", nbuf * ndesc, dd->dd_name, error);
3639		goto fail1;
3640	}
3641
3642	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3643				dd->dd_desc, dd->dd_desc_len,
3644				ath_load_cb, &dd->dd_desc_paddr,
3645				BUS_DMA_NOWAIT);
3646	if (error != 0) {
3647		if_printf(ifp, "unable to map %s descriptors, error %u\n",
3648			dd->dd_name, error);
3649		goto fail2;
3650	}
3651
3652	ds = dd->dd_desc;
3653	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3654	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3655	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3656
3657	/* allocate rx buffers */
3658	bsize = sizeof(struct ath_buf) * nbuf;
3659	bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3660	if (bf == NULL) {
3661		if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3662			dd->dd_name, bsize);
3663		goto fail3;
3664	}
3665	dd->dd_bufptr = bf;
3666
3667	STAILQ_INIT(head);
3668	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3669		bf->bf_desc = ds;
3670		bf->bf_daddr = DS2PHYS(dd, ds);
3671		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3672				&bf->bf_dmamap);
3673		if (error != 0) {
3674			if_printf(ifp, "unable to create dmamap for %s "
3675				"buffer %u, error %u\n", dd->dd_name, i, error);
3676			ath_descdma_cleanup(sc, dd, head);
3677			return error;
3678		}
3679		STAILQ_INSERT_TAIL(head, bf, bf_list);
3680	}
3681	return 0;
3682fail3:
3683	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3684fail2:
3685	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3686fail1:
3687	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3688fail0:
3689	bus_dma_tag_destroy(dd->dd_dmat);
3690	memset(dd, 0, sizeof(*dd));
3691	return error;
3692#undef DS2PHYS
3693}
3694
3695static void
3696ath_descdma_cleanup(struct ath_softc *sc,
3697	struct ath_descdma *dd, ath_bufhead *head)
3698{
3699	struct ath_buf *bf;
3700	struct ieee80211_node *ni;
3701
3702	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3703	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3704	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3705	bus_dma_tag_destroy(dd->dd_dmat);
3706
3707	STAILQ_FOREACH(bf, head, bf_list) {
3708		if (bf->bf_m) {
3709			m_freem(bf->bf_m);
3710			bf->bf_m = NULL;
3711		}
3712		if (bf->bf_dmamap != NULL) {
3713			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3714			bf->bf_dmamap = NULL;
3715		}
3716		ni = bf->bf_node;
3717		bf->bf_node = NULL;
3718		if (ni != NULL) {
3719			/*
3720			 * Reclaim node reference.
3721			 */
3722			ieee80211_free_node(ni);
3723		}
3724	}
3725
3726	STAILQ_INIT(head);
3727	free(dd->dd_bufptr, M_ATHDEV);
3728	memset(dd, 0, sizeof(*dd));
3729}
3730
3731static int
3732ath_desc_alloc(struct ath_softc *sc)
3733{
3734	int error;
3735
3736	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3737			"rx", ath_rxbuf, 1);
3738	if (error != 0)
3739		return error;
3740
3741	error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3742			"tx", ath_txbuf, ATH_TXDESC);
3743	if (error != 0) {
3744		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3745		return error;
3746	}
3747
3748	error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3749			"beacon", ATH_BCBUF, 1);
3750	if (error != 0) {
3751		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3752		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3753		return error;
3754	}
3755	return 0;
3756}
3757
3758static void
3759ath_desc_free(struct ath_softc *sc)
3760{
3761
3762	if (sc->sc_bdma.dd_desc_len != 0)
3763		ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3764	if (sc->sc_txdma.dd_desc_len != 0)
3765		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3766	if (sc->sc_rxdma.dd_desc_len != 0)
3767		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3768}
3769
3770static struct ieee80211_node *
3771ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3772{
3773	struct ieee80211com *ic = vap->iv_ic;
3774	struct ath_softc *sc = ic->ic_ifp->if_softc;
3775	const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3776	struct ath_node *an;
3777
3778	an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3779	if (an == NULL) {
3780		/* XXX stat+msg */
3781		return NULL;
3782	}
3783	ath_rate_node_init(sc, an);
3784
3785	DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3786	return &an->an_node;
3787}
3788
3789static void
3790ath_node_free(struct ieee80211_node *ni)
3791{
3792	struct ieee80211com *ic = ni->ni_ic;
3793        struct ath_softc *sc = ic->ic_ifp->if_softc;
3794
3795	DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3796
3797	ath_rate_node_cleanup(sc, ATH_NODE(ni));
3798	sc->sc_node_free(ni);
3799}
3800
3801static void
3802ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3803{
3804	struct ieee80211com *ic = ni->ni_ic;
3805	struct ath_softc *sc = ic->ic_ifp->if_softc;
3806	struct ath_hal *ah = sc->sc_ah;
3807
3808	*rssi = ic->ic_node_getrssi(ni);
3809	if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3810		*noise = ath_hal_getchannoise(ah, ni->ni_chan);
3811	else
3812		*noise = -95;		/* nominally correct */
3813}
3814
3815static int
3816ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3817{
3818	struct ath_hal *ah = sc->sc_ah;
3819	int error;
3820	struct mbuf *m;
3821	struct ath_desc *ds;
3822
3823	m = bf->bf_m;
3824	if (m == NULL) {
3825		/*
3826		 * NB: by assigning a page to the rx dma buffer we
3827		 * implicitly satisfy the Atheros requirement that
3828		 * this buffer be cache-line-aligned and sized to be
3829		 * multiple of the cache line size.  Not doing this
3830		 * causes weird stuff to happen (for the 5210 at least).
3831		 */
3832		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3833		if (m == NULL) {
3834			DPRINTF(sc, ATH_DEBUG_ANY,
3835				"%s: no mbuf/cluster\n", __func__);
3836			sc->sc_stats.ast_rx_nombuf++;
3837			return ENOMEM;
3838		}
3839		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3840
3841		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3842					     bf->bf_dmamap, m,
3843					     bf->bf_segs, &bf->bf_nseg,
3844					     BUS_DMA_NOWAIT);
3845		if (error != 0) {
3846			DPRINTF(sc, ATH_DEBUG_ANY,
3847			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3848			    __func__, error);
3849			sc->sc_stats.ast_rx_busdma++;
3850			m_freem(m);
3851			return error;
3852		}
3853		KASSERT(bf->bf_nseg == 1,
3854			("multi-segment packet; nseg %u", bf->bf_nseg));
3855		bf->bf_m = m;
3856	}
3857	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3858
3859	/*
3860	 * Setup descriptors.  For receive we always terminate
3861	 * the descriptor list with a self-linked entry so we'll
3862	 * not get overrun under high load (as can happen with a
3863	 * 5212 when ANI processing enables PHY error frames).
3864	 *
3865	 * To insure the last descriptor is self-linked we create
3866	 * each descriptor as self-linked and add it to the end.  As
3867	 * each additional descriptor is added the previous self-linked
3868	 * entry is ``fixed'' naturally.  This should be safe even
3869	 * if DMA is happening.  When processing RX interrupts we
3870	 * never remove/process the last, self-linked, entry on the
3871	 * descriptor list.  This insures the hardware always has
3872	 * someplace to write a new frame.
3873	 */
3874	ds = bf->bf_desc;
3875	ds->ds_link = bf->bf_daddr;	/* link to self */
3876	ds->ds_data = bf->bf_segs[0].ds_addr;
3877	ath_hal_setuprxdesc(ah, ds
3878		, m->m_len		/* buffer size */
3879		, 0
3880	);
3881
3882	if (sc->sc_rxlink != NULL)
3883		*sc->sc_rxlink = bf->bf_daddr;
3884	sc->sc_rxlink = &ds->ds_link;
3885	return 0;
3886}
3887
3888/*
3889 * Extend 15-bit time stamp from rx descriptor to
3890 * a full 64-bit TSF using the specified TSF.
3891 */
3892static __inline u_int64_t
3893ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3894{
3895	if ((tsf & 0x7fff) < rstamp)
3896		tsf -= 0x8000;
3897	return ((tsf &~ 0x7fff) | rstamp);
3898}
3899
3900/*
3901 * Intercept management frames to collect beacon rssi data
3902 * and to do ibss merges.
3903 */
3904static void
3905ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3906	int subtype, int rssi, int noise, u_int32_t rstamp)
3907{
3908	struct ieee80211vap *vap = ni->ni_vap;
3909	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3910
3911	/*
3912	 * Call up first so subsequent work can use information
3913	 * potentially stored in the node (e.g. for ibss merge).
3914	 */
3915	ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, noise, rstamp);
3916	switch (subtype) {
3917	case IEEE80211_FC0_SUBTYPE_BEACON:
3918		/* update rssi statistics for use by the hal */
3919		ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3920		if (sc->sc_syncbeacon &&
3921		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3922			/*
3923			 * Resync beacon timers using the tsf of the beacon
3924			 * frame we just received.
3925			 */
3926			ath_beacon_config(sc, vap);
3927		}
3928		/* fall thru... */
3929	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3930		if (vap->iv_opmode == IEEE80211_M_IBSS &&
3931		    vap->iv_state == IEEE80211_S_RUN) {
3932			u_int64_t tsf = ath_extend_tsf(rstamp,
3933				ath_hal_gettsf64(sc->sc_ah));
3934			/*
3935			 * Handle ibss merge as needed; check the tsf on the
3936			 * frame before attempting the merge.  The 802.11 spec
3937			 * says the station should change it's bssid to match
3938			 * the oldest station with the same ssid, where oldest
3939			 * is determined by the tsf.  Note that hardware
3940			 * reconfiguration happens through callback to
3941			 * ath_newstate as the state machine will go from
3942			 * RUN -> RUN when this happens.
3943			 */
3944			if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3945				DPRINTF(sc, ATH_DEBUG_STATE,
3946				    "ibss merge, rstamp %u tsf %ju "
3947				    "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3948				    (uintmax_t)ni->ni_tstamp.tsf);
3949				(void) ieee80211_ibss_merge(ni);
3950			}
3951		}
3952		break;
3953	}
3954}
3955
3956/*
3957 * Set the default antenna.
3958 */
3959static void
3960ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3961{
3962	struct ath_hal *ah = sc->sc_ah;
3963
3964	/* XXX block beacon interrupts */
3965	ath_hal_setdefantenna(ah, antenna);
3966	if (sc->sc_defant != antenna)
3967		sc->sc_stats.ast_ant_defswitch++;
3968	sc->sc_defant = antenna;
3969	sc->sc_rxotherant = 0;
3970}
3971
3972static int
3973ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3974	const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3975{
3976#define	CHAN_HT20	htole32(IEEE80211_CHAN_HT20)
3977#define	CHAN_HT40U	htole32(IEEE80211_CHAN_HT40U)
3978#define	CHAN_HT40D	htole32(IEEE80211_CHAN_HT40D)
3979#define	CHAN_HT		(CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3980	struct ath_softc *sc = ifp->if_softc;
3981	const HAL_RATE_TABLE *rt;
3982	uint8_t rix;
3983
3984	/*
3985	 * Discard anything shorter than an ack or cts.
3986	 */
3987	if (m->m_pkthdr.len < IEEE80211_ACK_LEN) {
3988		DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n",
3989			__func__, m->m_pkthdr.len);
3990		sc->sc_stats.ast_rx_tooshort++;
3991		return 0;
3992	}
3993	rt = sc->sc_currates;
3994	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3995	rix = rt->rateCodeToIndex[rs->rs_rate];
3996	sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3997	sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3998#ifdef AH_SUPPORT_AR5416
3999	sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
4000	if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) {	/* HT rate */
4001		struct ieee80211com *ic = ifp->if_l2com;
4002
4003		if ((rs->rs_flags & HAL_RX_2040) == 0)
4004			sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
4005		else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
4006			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
4007		else
4008			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
4009		if ((rs->rs_flags & HAL_RX_GI) == 0)
4010			sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
4011	}
4012#endif
4013	sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
4014	if (rs->rs_status & HAL_RXERR_CRC)
4015		sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4016	/* XXX propagate other error flags from descriptor */
4017	sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf;
4018	sc->sc_rx_th.wr_antnoise = nf;
4019	sc->sc_rx_th.wr_antenna = rs->rs_antenna;
4020
4021	bpf_mtap2(ifp->if_bpf, &sc->sc_rx_th, sc->sc_rx_th_len, m);
4022
4023	return 1;
4024#undef CHAN_HT
4025#undef CHAN_HT20
4026#undef CHAN_HT40U
4027#undef CHAN_HT40D
4028}
4029
4030static void
4031ath_handle_micerror(struct ieee80211com *ic,
4032	struct ieee80211_frame *wh, int keyix)
4033{
4034	struct ieee80211_node *ni;
4035
4036	/* XXX recheck MIC to deal w/ chips that lie */
4037	/* XXX discard MIC errors on !data frames */
4038	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
4039	if (ni != NULL) {
4040		ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
4041		ieee80211_free_node(ni);
4042	}
4043}
4044
4045static void
4046ath_rx_proc(void *arg, int npending)
4047{
4048#define	PA2DESC(_sc, _pa) \
4049	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4050		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4051	struct ath_softc *sc = arg;
4052	struct ath_buf *bf;
4053	struct ifnet *ifp = sc->sc_ifp;
4054	struct ieee80211com *ic = ifp->if_l2com;
4055	struct ath_hal *ah = sc->sc_ah;
4056	struct ath_desc *ds;
4057	struct ath_rx_status *rs;
4058	struct mbuf *m;
4059	struct ieee80211_node *ni;
4060	int len, type, ngood;
4061	u_int phyerr;
4062	HAL_STATUS status;
4063	int16_t nf;
4064	u_int64_t tsf;
4065
4066	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
4067	ngood = 0;
4068	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
4069	sc->sc_stats.ast_rx_noise = nf;
4070	tsf = ath_hal_gettsf64(ah);
4071	do {
4072		bf = STAILQ_FIRST(&sc->sc_rxbuf);
4073		if (bf == NULL) {		/* NB: shouldn't happen */
4074			if_printf(ifp, "%s: no buffer!\n", __func__);
4075			break;
4076		}
4077		m = bf->bf_m;
4078		if (m == NULL) {		/* NB: shouldn't happen */
4079			/*
4080			 * If mbuf allocation failed previously there
4081			 * will be no mbuf; try again to re-populate it.
4082			 */
4083			/* XXX make debug msg */
4084			if_printf(ifp, "%s: no mbuf!\n", __func__);
4085			STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4086			goto rx_next;
4087		}
4088		ds = bf->bf_desc;
4089		if (ds->ds_link == bf->bf_daddr) {
4090			/* NB: never process the self-linked entry at the end */
4091			break;
4092		}
4093		/* XXX sync descriptor memory */
4094		/*
4095		 * Must provide the virtual address of the current
4096		 * descriptor, the physical address, and the virtual
4097		 * address of the next descriptor in the h/w chain.
4098		 * This allows the HAL to look ahead to see if the
4099		 * hardware is done with a descriptor by checking the
4100		 * done bit in the following descriptor and the address
4101		 * of the current descriptor the DMA engine is working
4102		 * on.  All this is necessary because of our use of
4103		 * a self-linked list to avoid rx overruns.
4104		 */
4105		rs = &bf->bf_status.ds_rxstat;
4106		status = ath_hal_rxprocdesc(ah, ds,
4107				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4108#ifdef ATH_DEBUG
4109		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4110			ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4111#endif
4112		if (status == HAL_EINPROGRESS)
4113			break;
4114		STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4115		if (rs->rs_status != 0) {
4116			if (rs->rs_status & HAL_RXERR_CRC)
4117				sc->sc_stats.ast_rx_crcerr++;
4118			if (rs->rs_status & HAL_RXERR_FIFO)
4119				sc->sc_stats.ast_rx_fifoerr++;
4120			if (rs->rs_status & HAL_RXERR_PHY) {
4121				sc->sc_stats.ast_rx_phyerr++;
4122				phyerr = rs->rs_phyerr & 0x1f;
4123				sc->sc_stats.ast_rx_phy[phyerr]++;
4124				goto rx_error;	/* NB: don't count in ierrors */
4125			}
4126			if (rs->rs_status & HAL_RXERR_DECRYPT) {
4127				/*
4128				 * Decrypt error.  If the error occurred
4129				 * because there was no hardware key, then
4130				 * let the frame through so the upper layers
4131				 * can process it.  This is necessary for 5210
4132				 * parts which have no way to setup a ``clear''
4133				 * key cache entry.
4134				 *
4135				 * XXX do key cache faulting
4136				 */
4137				if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4138					goto rx_accept;
4139				sc->sc_stats.ast_rx_badcrypt++;
4140			}
4141			if (rs->rs_status & HAL_RXERR_MIC) {
4142				sc->sc_stats.ast_rx_badmic++;
4143				/*
4144				 * Do minimal work required to hand off
4145				 * the 802.11 header for notifcation.
4146				 */
4147				/* XXX frag's and qos frames */
4148				len = rs->rs_datalen;
4149				if (len >= sizeof (struct ieee80211_frame)) {
4150					bus_dmamap_sync(sc->sc_dmat,
4151					    bf->bf_dmamap,
4152					    BUS_DMASYNC_POSTREAD);
4153					ath_handle_micerror(ic,
4154					    mtod(m, struct ieee80211_frame *),
4155					    sc->sc_splitmic ?
4156						rs->rs_keyix-32 : rs->rs_keyix);
4157				}
4158			}
4159			ifp->if_ierrors++;
4160rx_error:
4161			/*
4162			 * Cleanup any pending partial frame.
4163			 */
4164			if (sc->sc_rxpending != NULL) {
4165				m_freem(sc->sc_rxpending);
4166				sc->sc_rxpending = NULL;
4167			}
4168			/*
4169			 * When a tap is present pass error frames
4170			 * that have been requested.  By default we
4171			 * pass decrypt+mic errors but others may be
4172			 * interesting (e.g. crc).
4173			 */
4174			if (bpf_peers_present(ifp->if_bpf) &&
4175			    (rs->rs_status & sc->sc_monpass)) {
4176				bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4177				    BUS_DMASYNC_POSTREAD);
4178				/* NB: bpf needs the mbuf length setup */
4179				len = rs->rs_datalen;
4180				m->m_pkthdr.len = m->m_len = len;
4181				(void) ath_rx_tap(ifp, m, rs, tsf, nf);
4182			}
4183			/* XXX pass MIC errors up for s/w reclaculation */
4184			goto rx_next;
4185		}
4186rx_accept:
4187		/*
4188		 * Sync and unmap the frame.  At this point we're
4189		 * committed to passing the mbuf somewhere so clear
4190		 * bf_m; this means a new mbuf must be allocated
4191		 * when the rx descriptor is setup again to receive
4192		 * another frame.
4193		 */
4194		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4195		    BUS_DMASYNC_POSTREAD);
4196		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4197		bf->bf_m = NULL;
4198
4199		len = rs->rs_datalen;
4200		m->m_len = len;
4201
4202		if (rs->rs_more) {
4203			/*
4204			 * Frame spans multiple descriptors; save
4205			 * it for the next completed descriptor, it
4206			 * will be used to construct a jumbogram.
4207			 */
4208			if (sc->sc_rxpending != NULL) {
4209				/* NB: max frame size is currently 2 clusters */
4210				sc->sc_stats.ast_rx_toobig++;
4211				m_freem(sc->sc_rxpending);
4212			}
4213			m->m_pkthdr.rcvif = ifp;
4214			m->m_pkthdr.len = len;
4215			sc->sc_rxpending = m;
4216			goto rx_next;
4217		} else if (sc->sc_rxpending != NULL) {
4218			/*
4219			 * This is the second part of a jumbogram,
4220			 * chain it to the first mbuf, adjust the
4221			 * frame length, and clear the rxpending state.
4222			 */
4223			sc->sc_rxpending->m_next = m;
4224			sc->sc_rxpending->m_pkthdr.len += len;
4225			m = sc->sc_rxpending;
4226			sc->sc_rxpending = NULL;
4227		} else {
4228			/*
4229			 * Normal single-descriptor receive; setup
4230			 * the rcvif and packet length.
4231			 */
4232			m->m_pkthdr.rcvif = ifp;
4233			m->m_pkthdr.len = len;
4234		}
4235
4236		ifp->if_ipackets++;
4237		sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4238
4239		if (bpf_peers_present(ifp->if_bpf) &&
4240		    !ath_rx_tap(ifp, m, rs, tsf, nf)) {
4241			m_freem(m);		/* XXX reclaim */
4242			goto rx_next;
4243		}
4244
4245		/*
4246		 * From this point on we assume the frame is at least
4247		 * as large as ieee80211_frame_min; verify that.
4248		 */
4249		if (len < IEEE80211_MIN_LEN) {
4250			DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n",
4251				__func__, len);
4252			sc->sc_stats.ast_rx_tooshort++;
4253			m_freem(m);
4254			goto rx_next;
4255		}
4256
4257		if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4258			const HAL_RATE_TABLE *rt = sc->sc_currates;
4259			uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4260
4261			ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4262			    sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4263		}
4264
4265		m_adj(m, -IEEE80211_CRC_LEN);
4266
4267		/*
4268		 * Locate the node for sender, track state, and then
4269		 * pass the (referenced) node up to the 802.11 layer
4270		 * for its use.
4271		 */
4272		ni = ieee80211_find_rxnode_withkey(ic,
4273			mtod(m, const struct ieee80211_frame_min *),
4274			rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4275				IEEE80211_KEYIX_NONE : rs->rs_keyix);
4276		if (ni != NULL) {
4277			/*
4278			 * Sending station is known, dispatch directly.
4279			 */
4280#ifdef IEEE80211_SUPPORT_TDMA
4281			sc->sc_tdmars = rs;
4282#endif
4283			type = ieee80211_input(ni, m,
4284			    rs->rs_rssi, nf, rs->rs_tstamp);
4285			ieee80211_free_node(ni);
4286			/*
4287			 * Arrange to update the last rx timestamp only for
4288			 * frames from our ap when operating in station mode.
4289			 * This assumes the rx key is always setup when
4290			 * associated.
4291			 */
4292			if (ic->ic_opmode == IEEE80211_M_STA &&
4293			    rs->rs_keyix != HAL_RXKEYIX_INVALID)
4294				ngood++;
4295		} else {
4296			type = ieee80211_input_all(ic, m,
4297			    rs->rs_rssi, nf, rs->rs_tstamp);
4298		}
4299		/*
4300		 * Track rx rssi and do any rx antenna management.
4301		 */
4302		ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4303		if (sc->sc_diversity) {
4304			/*
4305			 * When using fast diversity, change the default rx
4306			 * antenna if diversity chooses the other antenna 3
4307			 * times in a row.
4308			 */
4309			if (sc->sc_defant != rs->rs_antenna) {
4310				if (++sc->sc_rxotherant >= 3)
4311					ath_setdefantenna(sc, rs->rs_antenna);
4312			} else
4313				sc->sc_rxotherant = 0;
4314		}
4315		if (sc->sc_softled) {
4316			/*
4317			 * Blink for any data frame.  Otherwise do a
4318			 * heartbeat-style blink when idle.  The latter
4319			 * is mainly for station mode where we depend on
4320			 * periodic beacon frames to trigger the poll event.
4321			 */
4322			if (type == IEEE80211_FC0_TYPE_DATA) {
4323				const HAL_RATE_TABLE *rt = sc->sc_currates;
4324				ath_led_event(sc,
4325				    rt->rateCodeToIndex[rs->rs_rate]);
4326			} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4327				ath_led_event(sc, 0);
4328		}
4329rx_next:
4330		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4331	} while (ath_rxbuf_init(sc, bf) == 0);
4332
4333	/* rx signal state monitoring */
4334	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4335	if (ngood)
4336		sc->sc_lastrx = tsf;
4337
4338	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
4339	    !IFQ_IS_EMPTY(&ifp->if_snd))
4340		ath_start(ifp);
4341
4342#undef PA2DESC
4343}
4344
4345static void
4346ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4347{
4348	txq->axq_qnum = qnum;
4349	txq->axq_depth = 0;
4350	txq->axq_intrcnt = 0;
4351	txq->axq_link = NULL;
4352	STAILQ_INIT(&txq->axq_q);
4353	ATH_TXQ_LOCK_INIT(sc, txq);
4354	TAILQ_INIT(&txq->axq_stageq);
4355	txq->axq_curage = 0;
4356}
4357
4358/*
4359 * Setup a h/w transmit queue.
4360 */
4361static struct ath_txq *
4362ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4363{
4364#define	N(a)	(sizeof(a)/sizeof(a[0]))
4365	struct ath_hal *ah = sc->sc_ah;
4366	HAL_TXQ_INFO qi;
4367	int qnum;
4368
4369	memset(&qi, 0, sizeof(qi));
4370	qi.tqi_subtype = subtype;
4371	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4372	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4373	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4374	/*
4375	 * Enable interrupts only for EOL and DESC conditions.
4376	 * We mark tx descriptors to receive a DESC interrupt
4377	 * when a tx queue gets deep; otherwise waiting for the
4378	 * EOL to reap descriptors.  Note that this is done to
4379	 * reduce interrupt load and this only defers reaping
4380	 * descriptors, never transmitting frames.  Aside from
4381	 * reducing interrupts this also permits more concurrency.
4382	 * The only potential downside is if the tx queue backs
4383	 * up in which case the top half of the kernel may backup
4384	 * due to a lack of tx descriptors.
4385	 */
4386	qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4387	qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4388	if (qnum == -1) {
4389		/*
4390		 * NB: don't print a message, this happens
4391		 * normally on parts with too few tx queues
4392		 */
4393		return NULL;
4394	}
4395	if (qnum >= N(sc->sc_txq)) {
4396		device_printf(sc->sc_dev,
4397			"hal qnum %u out of range, max %zu!\n",
4398			qnum, N(sc->sc_txq));
4399		ath_hal_releasetxqueue(ah, qnum);
4400		return NULL;
4401	}
4402	if (!ATH_TXQ_SETUP(sc, qnum)) {
4403		ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4404		sc->sc_txqsetup |= 1<<qnum;
4405	}
4406	return &sc->sc_txq[qnum];
4407#undef N
4408}
4409
4410/*
4411 * Setup a hardware data transmit queue for the specified
4412 * access control.  The hal may not support all requested
4413 * queues in which case it will return a reference to a
4414 * previously setup queue.  We record the mapping from ac's
4415 * to h/w queues for use by ath_tx_start and also track
4416 * the set of h/w queues being used to optimize work in the
4417 * transmit interrupt handler and related routines.
4418 */
4419static int
4420ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4421{
4422#define	N(a)	(sizeof(a)/sizeof(a[0]))
4423	struct ath_txq *txq;
4424
4425	if (ac >= N(sc->sc_ac2q)) {
4426		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4427			ac, N(sc->sc_ac2q));
4428		return 0;
4429	}
4430	txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4431	if (txq != NULL) {
4432		sc->sc_ac2q[ac] = txq;
4433		return 1;
4434	} else
4435		return 0;
4436#undef N
4437}
4438
4439/*
4440 * Update WME parameters for a transmit queue.
4441 */
4442static int
4443ath_txq_update(struct ath_softc *sc, int ac)
4444{
4445#define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
4446#define	ATH_TXOP_TO_US(v)		(v<<5)
4447	struct ifnet *ifp = sc->sc_ifp;
4448	struct ieee80211com *ic = ifp->if_l2com;
4449	struct ath_txq *txq = sc->sc_ac2q[ac];
4450	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4451	struct ath_hal *ah = sc->sc_ah;
4452	HAL_TXQ_INFO qi;
4453
4454	ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4455#ifdef IEEE80211_SUPPORT_TDMA
4456	if (sc->sc_tdma) {
4457		/*
4458		 * AIFS is zero so there's no pre-transmit wait.  The
4459		 * burst time defines the slot duration and is configured
4460		 * via sysctl.  The QCU is setup to not do post-xmit
4461		 * back off, lockout all lower-priority QCU's, and fire
4462		 * off the DMA beacon alert timer which is setup based
4463		 * on the slot configuration.
4464		 */
4465		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4466			      | HAL_TXQ_TXERRINT_ENABLE
4467			      | HAL_TXQ_TXURNINT_ENABLE
4468			      | HAL_TXQ_TXEOLINT_ENABLE
4469			      | HAL_TXQ_DBA_GATED
4470			      | HAL_TXQ_BACKOFF_DISABLE
4471			      | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4472			      ;
4473		qi.tqi_aifs = 0;
4474		/* XXX +dbaprep? */
4475		qi.tqi_readyTime = sc->sc_tdmaslotlen;
4476		qi.tqi_burstTime = qi.tqi_readyTime;
4477	} else {
4478#endif
4479		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4480			      | HAL_TXQ_TXERRINT_ENABLE
4481			      | HAL_TXQ_TXDESCINT_ENABLE
4482			      | HAL_TXQ_TXURNINT_ENABLE
4483			      ;
4484		qi.tqi_aifs = wmep->wmep_aifsn;
4485		qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4486		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4487		qi.tqi_readyTime = 0;
4488		qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4489#ifdef IEEE80211_SUPPORT_TDMA
4490	}
4491#endif
4492
4493	DPRINTF(sc, ATH_DEBUG_RESET,
4494	    "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4495	    __func__, txq->axq_qnum, qi.tqi_qflags,
4496	    qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4497
4498	if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4499		if_printf(ifp, "unable to update hardware queue "
4500			"parameters for %s traffic!\n",
4501			ieee80211_wme_acnames[ac]);
4502		return 0;
4503	} else {
4504		ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4505		return 1;
4506	}
4507#undef ATH_TXOP_TO_US
4508#undef ATH_EXPONENT_TO_VALUE
4509}
4510
4511/*
4512 * Callback from the 802.11 layer to update WME parameters.
4513 */
4514static int
4515ath_wme_update(struct ieee80211com *ic)
4516{
4517	struct ath_softc *sc = ic->ic_ifp->if_softc;
4518
4519	return !ath_txq_update(sc, WME_AC_BE) ||
4520	    !ath_txq_update(sc, WME_AC_BK) ||
4521	    !ath_txq_update(sc, WME_AC_VI) ||
4522	    !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4523}
4524
4525/*
4526 * Reclaim resources for a setup queue.
4527 */
4528static void
4529ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4530{
4531
4532	ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4533	ATH_TXQ_LOCK_DESTROY(txq);
4534	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4535}
4536
4537/*
4538 * Reclaim all tx queue resources.
4539 */
4540static void
4541ath_tx_cleanup(struct ath_softc *sc)
4542{
4543	int i;
4544
4545	ATH_TXBUF_LOCK_DESTROY(sc);
4546	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4547		if (ATH_TXQ_SETUP(sc, i))
4548			ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4549}
4550
4551/*
4552 * Return h/w rate index for an IEEE rate (w/o basic rate bit).
4553 */
4554static int
4555ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate)
4556{
4557	int i;
4558
4559	for (i = 0; i < rt->rateCount; i++)
4560		if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate)
4561			return i;
4562	return 0;		/* NB: lowest rate */
4563}
4564
4565/*
4566 * Reclaim mbuf resources.  For fragmented frames we
4567 * need to claim each frag chained with m_nextpkt.
4568 */
4569static void
4570ath_freetx(struct mbuf *m)
4571{
4572	struct mbuf *next;
4573
4574	do {
4575		next = m->m_nextpkt;
4576		m->m_nextpkt = NULL;
4577		m_freem(m);
4578	} while ((m = next) != NULL);
4579}
4580
4581static int
4582ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
4583{
4584	struct mbuf *m;
4585	int error;
4586
4587	/*
4588	 * Load the DMA map so any coalescing is done.  This
4589	 * also calculates the number of descriptors we need.
4590	 */
4591	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4592				     bf->bf_segs, &bf->bf_nseg,
4593				     BUS_DMA_NOWAIT);
4594	if (error == EFBIG) {
4595		/* XXX packet requires too many descriptors */
4596		bf->bf_nseg = ATH_TXDESC+1;
4597	} else if (error != 0) {
4598		sc->sc_stats.ast_tx_busdma++;
4599		ath_freetx(m0);
4600		return error;
4601	}
4602	/*
4603	 * Discard null packets and check for packets that
4604	 * require too many TX descriptors.  We try to convert
4605	 * the latter to a cluster.
4606	 */
4607	if (bf->bf_nseg > ATH_TXDESC) {		/* too many desc's, linearize */
4608		sc->sc_stats.ast_tx_linear++;
4609		m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
4610		if (m == NULL) {
4611			ath_freetx(m0);
4612			sc->sc_stats.ast_tx_nombuf++;
4613			return ENOMEM;
4614		}
4615		m0 = m;
4616		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4617					     bf->bf_segs, &bf->bf_nseg,
4618					     BUS_DMA_NOWAIT);
4619		if (error != 0) {
4620			sc->sc_stats.ast_tx_busdma++;
4621			ath_freetx(m0);
4622			return error;
4623		}
4624		KASSERT(bf->bf_nseg <= ATH_TXDESC,
4625		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
4626	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
4627		sc->sc_stats.ast_tx_nodata++;
4628		ath_freetx(m0);
4629		return EIO;
4630	}
4631	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
4632		__func__, m0, m0->m_pkthdr.len);
4633	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
4634	bf->bf_m = m0;
4635
4636	return 0;
4637}
4638
4639static void
4640ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
4641{
4642	struct ath_hal *ah = sc->sc_ah;
4643	struct ath_desc *ds, *ds0;
4644	int i;
4645
4646	/*
4647	 * Fillin the remainder of the descriptor info.
4648	 */
4649	ds0 = ds = bf->bf_desc;
4650	for (i = 0; i < bf->bf_nseg; i++, ds++) {
4651		ds->ds_data = bf->bf_segs[i].ds_addr;
4652		if (i == bf->bf_nseg - 1)
4653			ds->ds_link = 0;
4654		else
4655			ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
4656		ath_hal_filltxdesc(ah, ds
4657			, bf->bf_segs[i].ds_len	/* segment length */
4658			, i == 0		/* first segment */
4659			, i == bf->bf_nseg - 1	/* last segment */
4660			, ds0			/* first descriptor */
4661		);
4662		DPRINTF(sc, ATH_DEBUG_XMIT,
4663			"%s: %d: %08x %08x %08x %08x %08x %08x\n",
4664			__func__, i, ds->ds_link, ds->ds_data,
4665			ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
4666	}
4667	/*
4668	 * Insert the frame on the outbound list and pass it on
4669	 * to the hardware.  Multicast frames buffered for power
4670	 * save stations and transmit from the CAB queue are stored
4671	 * on a s/w only queue and loaded on to the CAB queue in
4672	 * the SWBA handler since frames only go out on DTIM and
4673	 * to avoid possible races.
4674	 */
4675	ATH_TXQ_LOCK(txq);
4676	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
4677	     ("busy status 0x%x", bf->bf_flags));
4678	if (txq->axq_qnum != ATH_TXQ_SWQ) {
4679#ifdef IEEE80211_SUPPORT_TDMA
4680		int qbusy;
4681
4682		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4683		qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
4684		if (txq->axq_link == NULL) {
4685			/*
4686			 * Be careful writing the address to TXDP.  If
4687			 * the tx q is enabled then this write will be
4688			 * ignored.  Normally this is not an issue but
4689			 * when tdma is in use and the q is beacon gated
4690			 * this race can occur.  If the q is busy then
4691			 * defer the work to later--either when another
4692			 * packet comes along or when we prepare a beacon
4693			 * frame at SWBA.
4694			 */
4695			if (!qbusy) {
4696				ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4697				txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4698				DPRINTF(sc, ATH_DEBUG_XMIT,
4699				    "%s: TXDP[%u] = %p (%p) depth %d\n",
4700				    __func__, txq->axq_qnum,
4701				    (caddr_t)bf->bf_daddr, bf->bf_desc,
4702				    txq->axq_depth);
4703			} else {
4704				txq->axq_flags |= ATH_TXQ_PUTPENDING;
4705				DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4706				    "%s: Q%u busy, defer enable\n", __func__,
4707				    txq->axq_qnum);
4708			}
4709		} else {
4710			*txq->axq_link = bf->bf_daddr;
4711			DPRINTF(sc, ATH_DEBUG_XMIT,
4712			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4713			    txq->axq_qnum, txq->axq_link,
4714			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4715			if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) {
4716				/*
4717				 * The q was busy when we previously tried
4718				 * to write the address of the first buffer
4719				 * in the chain.  Since it's not busy now
4720				 * handle this chore.  We are certain the
4721				 * buffer at the front is the right one since
4722				 * axq_link is NULL only when the buffer list
4723				 * is/was empty.
4724				 */
4725				ath_hal_puttxbuf(ah, txq->axq_qnum,
4726					STAILQ_FIRST(&txq->axq_q)->bf_daddr);
4727				txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4728				DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4729				    "%s: Q%u restarted\n", __func__,
4730				    txq->axq_qnum);
4731			}
4732		}
4733#else
4734		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4735		if (txq->axq_link == NULL) {
4736			ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4737			DPRINTF(sc, ATH_DEBUG_XMIT,
4738			    "%s: TXDP[%u] = %p (%p) depth %d\n",
4739			    __func__, txq->axq_qnum,
4740			    (caddr_t)bf->bf_daddr, bf->bf_desc,
4741			    txq->axq_depth);
4742		} else {
4743			*txq->axq_link = bf->bf_daddr;
4744			DPRINTF(sc, ATH_DEBUG_XMIT,
4745			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4746			    txq->axq_qnum, txq->axq_link,
4747			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4748		}
4749#endif /* IEEE80211_SUPPORT_TDMA */
4750		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4751		ath_hal_txstart(ah, txq->axq_qnum);
4752	} else {
4753		if (txq->axq_link != NULL) {
4754			struct ath_buf *last = ATH_TXQ_LAST(txq);
4755			struct ieee80211_frame *wh;
4756
4757			/* mark previous frame */
4758			wh = mtod(last->bf_m, struct ieee80211_frame *);
4759			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
4760			bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
4761			    BUS_DMASYNC_PREWRITE);
4762
4763			/* link descriptor */
4764			*txq->axq_link = bf->bf_daddr;
4765		}
4766		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4767		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4768	}
4769	ATH_TXQ_UNLOCK(txq);
4770}
4771
4772static int
4773ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
4774    struct mbuf *m0)
4775{
4776	struct ieee80211vap *vap = ni->ni_vap;
4777	struct ath_vap *avp = ATH_VAP(vap);
4778	struct ath_hal *ah = sc->sc_ah;
4779	struct ifnet *ifp = sc->sc_ifp;
4780	struct ieee80211com *ic = ifp->if_l2com;
4781	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
4782	int error, iswep, ismcast, isfrag, ismrr;
4783	int keyix, hdrlen, pktlen, try0;
4784	u_int8_t rix, txrate, ctsrate;
4785	u_int8_t cix = 0xff;		/* NB: silence compiler */
4786	struct ath_desc *ds;
4787	struct ath_txq *txq;
4788	struct ieee80211_frame *wh;
4789	u_int subtype, flags, ctsduration;
4790	HAL_PKT_TYPE atype;
4791	const HAL_RATE_TABLE *rt;
4792	HAL_BOOL shortPreamble;
4793	struct ath_node *an;
4794	u_int pri;
4795
4796	wh = mtod(m0, struct ieee80211_frame *);
4797	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
4798	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4799	isfrag = m0->m_flags & M_FRAG;
4800	hdrlen = ieee80211_anyhdrsize(wh);
4801	/*
4802	 * Packet length must not include any
4803	 * pad bytes; deduct them here.
4804	 */
4805	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
4806
4807	if (iswep) {
4808		const struct ieee80211_cipher *cip;
4809		struct ieee80211_key *k;
4810
4811		/*
4812		 * Construct the 802.11 header+trailer for an encrypted
4813		 * frame. The only reason this can fail is because of an
4814		 * unknown or unsupported cipher/key type.
4815		 */
4816		k = ieee80211_crypto_encap(ni, m0);
4817		if (k == NULL) {
4818			/*
4819			 * This can happen when the key is yanked after the
4820			 * frame was queued.  Just discard the frame; the
4821			 * 802.11 layer counts failures and provides
4822			 * debugging/diagnostics.
4823			 */
4824			ath_freetx(m0);
4825			return EIO;
4826		}
4827		/*
4828		 * Adjust the packet + header lengths for the crypto
4829		 * additions and calculate the h/w key index.  When
4830		 * a s/w mic is done the frame will have had any mic
4831		 * added to it prior to entry so m0->m_pkthdr.len will
4832		 * account for it. Otherwise we need to add it to the
4833		 * packet length.
4834		 */
4835		cip = k->wk_cipher;
4836		hdrlen += cip->ic_header;
4837		pktlen += cip->ic_header + cip->ic_trailer;
4838		/* NB: frags always have any TKIP MIC done in s/w */
4839		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
4840			pktlen += cip->ic_miclen;
4841		keyix = k->wk_keyix;
4842
4843		/* packet header may have moved, reset our local pointer */
4844		wh = mtod(m0, struct ieee80211_frame *);
4845	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
4846		/*
4847		 * Use station key cache slot, if assigned.
4848		 */
4849		keyix = ni->ni_ucastkey.wk_keyix;
4850		if (keyix == IEEE80211_KEYIX_NONE)
4851			keyix = HAL_TXKEYIX_INVALID;
4852	} else
4853		keyix = HAL_TXKEYIX_INVALID;
4854
4855	pktlen += IEEE80211_CRC_LEN;
4856
4857	/*
4858	 * Load the DMA map so any coalescing is done.  This
4859	 * also calculates the number of descriptors we need.
4860	 */
4861	error = ath_tx_dmasetup(sc, bf, m0);
4862	if (error != 0)
4863		return error;
4864	bf->bf_node = ni;			/* NB: held reference */
4865	m0 = bf->bf_m;				/* NB: may have changed */
4866	wh = mtod(m0, struct ieee80211_frame *);
4867
4868	/* setup descriptors */
4869	ds = bf->bf_desc;
4870	rt = sc->sc_currates;
4871	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4872
4873	/*
4874	 * NB: the 802.11 layer marks whether or not we should
4875	 * use short preamble based on the current mode and
4876	 * negotiated parameters.
4877	 */
4878	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
4879	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
4880		shortPreamble = AH_TRUE;
4881		sc->sc_stats.ast_tx_shortpre++;
4882	} else {
4883		shortPreamble = AH_FALSE;
4884	}
4885
4886	an = ATH_NODE(ni);
4887	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
4888	ismrr = 0;				/* default no multi-rate retry*/
4889	pri = M_WME_GETAC(m0);			/* honor classification */
4890	/* XXX use txparams instead of fixed values */
4891	/*
4892	 * Calculate Atheros packet type from IEEE80211 packet header,
4893	 * setup for rate calculations, and select h/w transmit queue.
4894	 */
4895	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
4896	case IEEE80211_FC0_TYPE_MGT:
4897		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4898		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
4899			atype = HAL_PKT_TYPE_BEACON;
4900		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4901			atype = HAL_PKT_TYPE_PROBE_RESP;
4902		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
4903			atype = HAL_PKT_TYPE_ATIM;
4904		else
4905			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
4906		rix = an->an_mgmtrix;
4907		txrate = rt->info[rix].rateCode;
4908		if (shortPreamble)
4909			txrate |= rt->info[rix].shortPreamble;
4910		try0 = ATH_TXMGTTRY;
4911		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
4912		break;
4913	case IEEE80211_FC0_TYPE_CTL:
4914		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
4915		rix = an->an_mgmtrix;
4916		txrate = rt->info[rix].rateCode;
4917		if (shortPreamble)
4918			txrate |= rt->info[rix].shortPreamble;
4919		try0 = ATH_TXMGTTRY;
4920		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
4921		break;
4922	case IEEE80211_FC0_TYPE_DATA:
4923		atype = HAL_PKT_TYPE_NORMAL;		/* default */
4924		/*
4925		 * Data frames: multicast frames go out at a fixed rate,
4926		 * EAPOL frames use the mgmt frame rate; otherwise consult
4927		 * the rate control module for the rate to use.
4928		 */
4929		if (ismcast) {
4930			rix = an->an_mcastrix;
4931			txrate = rt->info[rix].rateCode;
4932			if (shortPreamble)
4933				txrate |= rt->info[rix].shortPreamble;
4934			try0 = 1;
4935		} else if (m0->m_flags & M_EAPOL) {
4936			/* XXX? maybe always use long preamble? */
4937			rix = an->an_mgmtrix;
4938			txrate = rt->info[rix].rateCode;
4939			if (shortPreamble)
4940				txrate |= rt->info[rix].shortPreamble;
4941			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
4942		} else {
4943			ath_rate_findrate(sc, an, shortPreamble, pktlen,
4944				&rix, &try0, &txrate);
4945			sc->sc_txrix = rix;		/* for LED blinking */
4946			sc->sc_lastdatarix = rix;	/* for fast frames */
4947			if (try0 != ATH_TXMAXTRY)
4948				ismrr = 1;
4949		}
4950		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
4951			flags |= HAL_TXDESC_NOACK;
4952		break;
4953	default:
4954		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
4955			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
4956		/* XXX statistic */
4957		ath_freetx(m0);
4958		return EIO;
4959	}
4960	txq = sc->sc_ac2q[pri];
4961
4962	/*
4963	 * When servicing one or more stations in power-save mode
4964	 * (or) if there is some mcast data waiting on the mcast
4965	 * queue (to prevent out of order delivery) multicast
4966	 * frames must be buffered until after the beacon.
4967	 */
4968	if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
4969		txq = &avp->av_mcastq;
4970
4971	/*
4972	 * Calculate miscellaneous flags.
4973	 */
4974	if (ismcast) {
4975		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
4976	} else if (pktlen > vap->iv_rtsthreshold &&
4977	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
4978		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
4979		cix = rt->info[rix].controlRate;
4980		sc->sc_stats.ast_tx_rts++;
4981	}
4982	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
4983		sc->sc_stats.ast_tx_noack++;
4984#ifdef IEEE80211_SUPPORT_TDMA
4985	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
4986		DPRINTF(sc, ATH_DEBUG_TDMA,
4987		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
4988		sc->sc_stats.ast_tdma_ack++;
4989		ath_freetx(m0);
4990		return EIO;
4991	}
4992#endif
4993
4994	/*
4995	 * If 802.11g protection is enabled, determine whether
4996	 * to use RTS/CTS or just CTS.  Note that this is only
4997	 * done for OFDM unicast frames.
4998	 */
4999	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
5000	    rt->info[rix].phy == IEEE80211_T_OFDM &&
5001	    (flags & HAL_TXDESC_NOACK) == 0) {
5002		/* XXX fragments must use CCK rates w/ protection */
5003		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
5004			flags |= HAL_TXDESC_RTSENA;
5005		else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5006			flags |= HAL_TXDESC_CTSENA;
5007		if (isfrag) {
5008			/*
5009			 * For frags it would be desirable to use the
5010			 * highest CCK rate for RTS/CTS.  But stations
5011			 * farther away may detect it at a lower CCK rate
5012			 * so use the configured protection rate instead
5013			 * (for now).
5014			 */
5015			cix = rt->info[sc->sc_protrix].controlRate;
5016		} else
5017			cix = rt->info[sc->sc_protrix].controlRate;
5018		sc->sc_stats.ast_tx_protect++;
5019	}
5020
5021	/*
5022	 * Calculate duration.  This logically belongs in the 802.11
5023	 * layer but it lacks sufficient information to calculate it.
5024	 */
5025	if ((flags & HAL_TXDESC_NOACK) == 0 &&
5026	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
5027		u_int16_t dur;
5028		if (shortPreamble)
5029			dur = rt->info[rix].spAckDuration;
5030		else
5031			dur = rt->info[rix].lpAckDuration;
5032		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
5033			dur += dur;		/* additional SIFS+ACK */
5034			KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
5035			/*
5036			 * Include the size of next fragment so NAV is
5037			 * updated properly.  The last fragment uses only
5038			 * the ACK duration
5039			 */
5040			dur += ath_hal_computetxtime(ah, rt,
5041					m0->m_nextpkt->m_pkthdr.len,
5042					rix, shortPreamble);
5043		}
5044		if (isfrag) {
5045			/*
5046			 * Force hardware to use computed duration for next
5047			 * fragment by disabling multi-rate retry which updates
5048			 * duration based on the multi-rate duration table.
5049			 */
5050			ismrr = 0;
5051			try0 = ATH_TXMGTTRY;	/* XXX? */
5052		}
5053		*(u_int16_t *)wh->i_dur = htole16(dur);
5054	}
5055
5056	/*
5057	 * Calculate RTS/CTS rate and duration if needed.
5058	 */
5059	ctsduration = 0;
5060	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
5061		/*
5062		 * CTS transmit rate is derived from the transmit rate
5063		 * by looking in the h/w rate table.  We must also factor
5064		 * in whether or not a short preamble is to be used.
5065		 */
5066		/* NB: cix is set above where RTS/CTS is enabled */
5067		KASSERT(cix != 0xff, ("cix not setup"));
5068		ctsrate = rt->info[cix].rateCode;
5069		/*
5070		 * Compute the transmit duration based on the frame
5071		 * size and the size of an ACK frame.  We call into the
5072		 * HAL to do the computation since it depends on the
5073		 * characteristics of the actual PHY being used.
5074		 *
5075		 * NB: CTS is assumed the same size as an ACK so we can
5076		 *     use the precalculated ACK durations.
5077		 */
5078		if (shortPreamble) {
5079			ctsrate |= rt->info[cix].shortPreamble;
5080			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
5081				ctsduration += rt->info[cix].spAckDuration;
5082			ctsduration += ath_hal_computetxtime(ah,
5083				rt, pktlen, rix, AH_TRUE);
5084			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
5085				ctsduration += rt->info[rix].spAckDuration;
5086		} else {
5087			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
5088				ctsduration += rt->info[cix].lpAckDuration;
5089			ctsduration += ath_hal_computetxtime(ah,
5090				rt, pktlen, rix, AH_FALSE);
5091			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
5092				ctsduration += rt->info[rix].lpAckDuration;
5093		}
5094		/*
5095		 * Must disable multi-rate retry when using RTS/CTS.
5096		 */
5097		ismrr = 0;
5098		try0 = ATH_TXMGTTRY;		/* XXX */
5099	} else
5100		ctsrate = 0;
5101
5102	/*
5103	 * At this point we are committed to sending the frame
5104	 * and we don't need to look at m_nextpkt; clear it in
5105	 * case this frame is part of frag chain.
5106	 */
5107	m0->m_nextpkt = NULL;
5108
5109	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
5110		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
5111			sc->sc_hwmap[rix].ieeerate, -1);
5112
5113	if (bpf_peers_present(ifp->if_bpf)) {
5114		u_int64_t tsf = ath_hal_gettsf64(ah);
5115
5116		sc->sc_tx_th.wt_tsf = htole64(tsf);
5117		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
5118		if (iswep)
5119			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5120		if (isfrag)
5121			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
5122		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
5123		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
5124		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
5125
5126		bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
5127	}
5128
5129	/*
5130	 * Determine if a tx interrupt should be generated for
5131	 * this descriptor.  We take a tx interrupt to reap
5132	 * descriptors when the h/w hits an EOL condition or
5133	 * when the descriptor is specifically marked to generate
5134	 * an interrupt.  We periodically mark descriptors in this
5135	 * way to insure timely replenishing of the supply needed
5136	 * for sending frames.  Defering interrupts reduces system
5137	 * load and potentially allows more concurrent work to be
5138	 * done but if done to aggressively can cause senders to
5139	 * backup.
5140	 *
5141	 * NB: use >= to deal with sc_txintrperiod changing
5142	 *     dynamically through sysctl.
5143	 */
5144	if (flags & HAL_TXDESC_INTREQ) {
5145		txq->axq_intrcnt = 0;
5146	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
5147		flags |= HAL_TXDESC_INTREQ;
5148		txq->axq_intrcnt = 0;
5149	}
5150
5151	/*
5152	 * Formulate first tx descriptor with tx controls.
5153	 */
5154	/* XXX check return value? */
5155	ath_hal_setuptxdesc(ah, ds
5156		, pktlen		/* packet length */
5157		, hdrlen		/* header length */
5158		, atype			/* Atheros packet type */
5159		, ni->ni_txpower	/* txpower */
5160		, txrate, try0		/* series 0 rate/tries */
5161		, keyix			/* key cache index */
5162		, sc->sc_txantenna	/* antenna mode */
5163		, flags			/* flags */
5164		, ctsrate		/* rts/cts rate */
5165		, ctsduration		/* rts/cts duration */
5166	);
5167	bf->bf_txflags = flags;
5168	/*
5169	 * Setup the multi-rate retry state only when we're
5170	 * going to use it.  This assumes ath_hal_setuptxdesc
5171	 * initializes the descriptors (so we don't have to)
5172	 * when the hardware supports multi-rate retry and
5173	 * we don't use it.
5174	 */
5175	if (ismrr)
5176		ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
5177
5178	ath_tx_handoff(sc, txq, bf);
5179	return 0;
5180}
5181
5182/*
5183 * Process completed xmit descriptors from the specified queue.
5184 */
5185static int
5186ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
5187{
5188	struct ath_hal *ah = sc->sc_ah;
5189	struct ifnet *ifp = sc->sc_ifp;
5190	struct ieee80211com *ic = ifp->if_l2com;
5191	struct ath_buf *bf, *last;
5192	struct ath_desc *ds, *ds0;
5193	struct ath_tx_status *ts;
5194	struct ieee80211_node *ni;
5195	struct ath_node *an;
5196	int sr, lr, pri, nacked;
5197	HAL_STATUS status;
5198
5199	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
5200		__func__, txq->axq_qnum,
5201		(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
5202		txq->axq_link);
5203	nacked = 0;
5204	for (;;) {
5205		ATH_TXQ_LOCK(txq);
5206		txq->axq_intrcnt = 0;	/* reset periodic desc intr count */
5207		bf = STAILQ_FIRST(&txq->axq_q);
5208		if (bf == NULL) {
5209			ATH_TXQ_UNLOCK(txq);
5210			break;
5211		}
5212		ds0 = &bf->bf_desc[0];
5213		ds = &bf->bf_desc[bf->bf_nseg - 1];
5214		ts = &bf->bf_status.ds_txstat;
5215		status = ath_hal_txprocdesc(ah, ds, ts);
5216#ifdef ATH_DEBUG
5217		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
5218			ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
5219			    status == HAL_OK);
5220#endif
5221		if (status == HAL_EINPROGRESS) {
5222			ATH_TXQ_UNLOCK(txq);
5223			break;
5224		}
5225		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5226#ifdef IEEE80211_SUPPORT_TDMA
5227		if (txq->axq_depth > 0) {
5228			/*
5229			 * More frames follow.  Mark the buffer busy
5230			 * so it's not re-used while the hardware may
5231			 * still re-read the link field in the descriptor.
5232			 */
5233			bf->bf_flags |= ATH_BUF_BUSY;
5234		} else
5235#else
5236		if (txq->axq_depth == 0)
5237#endif
5238			txq->axq_link = NULL;
5239		ATH_TXQ_UNLOCK(txq);
5240
5241		ni = bf->bf_node;
5242		if (ni != NULL) {
5243			an = ATH_NODE(ni);
5244			if (ts->ts_status == 0) {
5245				u_int8_t txant = ts->ts_antenna;
5246				sc->sc_stats.ast_ant_tx[txant]++;
5247				sc->sc_ant_tx[txant]++;
5248				if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
5249					sc->sc_stats.ast_tx_altrate++;
5250				pri = M_WME_GETAC(bf->bf_m);
5251				if (pri >= WME_AC_VO)
5252					ic->ic_wme.wme_hipri_traffic++;
5253				if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
5254					ni->ni_inact = ni->ni_inact_reload;
5255			} else {
5256				if (ts->ts_status & HAL_TXERR_XRETRY)
5257					sc->sc_stats.ast_tx_xretries++;
5258				if (ts->ts_status & HAL_TXERR_FIFO)
5259					sc->sc_stats.ast_tx_fifoerr++;
5260				if (ts->ts_status & HAL_TXERR_FILT)
5261					sc->sc_stats.ast_tx_filtered++;
5262				if (bf->bf_m->m_flags & M_FF)
5263					sc->sc_stats.ast_ff_txerr++;
5264			}
5265			sr = ts->ts_shortretry;
5266			lr = ts->ts_longretry;
5267			sc->sc_stats.ast_tx_shortretry += sr;
5268			sc->sc_stats.ast_tx_longretry += lr;
5269			/*
5270			 * Hand the descriptor to the rate control algorithm.
5271			 */
5272			if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
5273			    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
5274				/*
5275				 * If frame was ack'd update statistics,
5276				 * including the last rx time used to
5277				 * workaround phantom bmiss interrupts.
5278				 */
5279				if (ts->ts_status == 0) {
5280					nacked++;
5281					sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
5282					ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
5283						ts->ts_rssi);
5284				}
5285				ath_rate_tx_complete(sc, an, bf);
5286			}
5287			/*
5288			 * Do any tx complete callback.  Note this must
5289			 * be done before releasing the node reference.
5290			 */
5291			if (bf->bf_m->m_flags & M_TXCB)
5292				ieee80211_process_callback(ni, bf->bf_m,
5293				    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
5294				        ts->ts_status : HAL_TXERR_XRETRY);
5295			/*
5296			 * Reclaim reference to node.
5297			 *
5298			 * NB: the node may be reclaimed here if, for example
5299			 *     this is a DEAUTH message that was sent and the
5300			 *     node was timed out due to inactivity.
5301			 */
5302			ieee80211_free_node(ni);
5303		}
5304		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
5305		    BUS_DMASYNC_POSTWRITE);
5306		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5307
5308		m_freem(bf->bf_m);
5309		bf->bf_m = NULL;
5310		bf->bf_node = NULL;
5311
5312		ATH_TXBUF_LOCK(sc);
5313		last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5314		if (last != NULL)
5315			last->bf_flags &= ~ATH_BUF_BUSY;
5316		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5317		ATH_TXBUF_UNLOCK(sc);
5318	}
5319	/*
5320	 * Flush fast-frame staging queue when traffic slows.
5321	 */
5322	if (txq->axq_depth <= 1)
5323		ath_ff_stageq_flush(sc, txq, ath_ff_always);
5324	return nacked;
5325}
5326
5327static __inline int
5328txqactive(struct ath_hal *ah, int qnum)
5329{
5330	u_int32_t txqs = 1<<qnum;
5331	ath_hal_gettxintrtxqs(ah, &txqs);
5332	return (txqs & (1<<qnum));
5333}
5334
5335/*
5336 * Deferred processing of transmit interrupt; special-cased
5337 * for a single hardware transmit queue (e.g. 5210 and 5211).
5338 */
5339static void
5340ath_tx_proc_q0(void *arg, int npending)
5341{
5342	struct ath_softc *sc = arg;
5343	struct ifnet *ifp = sc->sc_ifp;
5344
5345	if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
5346		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5347	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5348		ath_tx_processq(sc, sc->sc_cabq);
5349	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5350	sc->sc_wd_timer = 0;
5351
5352	if (sc->sc_softled)
5353		ath_led_event(sc, sc->sc_txrix);
5354
5355	ath_start(ifp);
5356}
5357
5358/*
5359 * Deferred processing of transmit interrupt; special-cased
5360 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
5361 */
5362static void
5363ath_tx_proc_q0123(void *arg, int npending)
5364{
5365	struct ath_softc *sc = arg;
5366	struct ifnet *ifp = sc->sc_ifp;
5367	int nacked;
5368
5369	/*
5370	 * Process each active queue.
5371	 */
5372	nacked = 0;
5373	if (txqactive(sc->sc_ah, 0))
5374		nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
5375	if (txqactive(sc->sc_ah, 1))
5376		nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
5377	if (txqactive(sc->sc_ah, 2))
5378		nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
5379	if (txqactive(sc->sc_ah, 3))
5380		nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
5381	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5382		ath_tx_processq(sc, sc->sc_cabq);
5383	if (nacked)
5384		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5385
5386	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5387	sc->sc_wd_timer = 0;
5388
5389	if (sc->sc_softled)
5390		ath_led_event(sc, sc->sc_txrix);
5391
5392	ath_start(ifp);
5393}
5394
5395/*
5396 * Deferred processing of transmit interrupt.
5397 */
5398static void
5399ath_tx_proc(void *arg, int npending)
5400{
5401	struct ath_softc *sc = arg;
5402	struct ifnet *ifp = sc->sc_ifp;
5403	int i, nacked;
5404
5405	/*
5406	 * Process each active queue.
5407	 */
5408	nacked = 0;
5409	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5410		if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
5411			nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
5412	if (nacked)
5413		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5414
5415	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5416	sc->sc_wd_timer = 0;
5417
5418	if (sc->sc_softled)
5419		ath_led_event(sc, sc->sc_txrix);
5420
5421	ath_start(ifp);
5422}
5423
5424static void
5425ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5426{
5427#ifdef ATH_DEBUG
5428	struct ath_hal *ah = sc->sc_ah;
5429#endif
5430	struct ieee80211_node *ni;
5431	struct ath_buf *bf;
5432	u_int ix;
5433
5434	/*
5435	 * NB: this assumes output has been stopped and
5436	 *     we do not need to block ath_tx_proc
5437	 */
5438	ATH_TXBUF_LOCK(sc);
5439	bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5440	if (bf != NULL)
5441		bf->bf_flags &= ~ATH_BUF_BUSY;
5442	ATH_TXBUF_UNLOCK(sc);
5443	for (ix = 0;; ix++) {
5444		ATH_TXQ_LOCK(txq);
5445		bf = STAILQ_FIRST(&txq->axq_q);
5446		if (bf == NULL) {
5447			txq->axq_link = NULL;
5448			ATH_TXQ_UNLOCK(txq);
5449			break;
5450		}
5451		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5452		ATH_TXQ_UNLOCK(txq);
5453#ifdef ATH_DEBUG
5454		if (sc->sc_debug & ATH_DEBUG_RESET) {
5455			struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5456
5457			ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5458				ath_hal_txprocdesc(ah, bf->bf_desc,
5459				    &bf->bf_status.ds_txstat) == HAL_OK);
5460			ieee80211_dump_pkt(ic, mtod(bf->bf_m, caddr_t),
5461				bf->bf_m->m_len, 0, -1);
5462		}
5463#endif /* ATH_DEBUG */
5464		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5465		ni = bf->bf_node;
5466		bf->bf_node = NULL;
5467		if (ni != NULL) {
5468			/*
5469			 * Do any callback and reclaim the node reference.
5470			 */
5471			if (bf->bf_m->m_flags & M_TXCB)
5472				ieee80211_process_callback(ni, bf->bf_m, -1);
5473			ieee80211_free_node(ni);
5474		}
5475		m_freem(bf->bf_m);
5476		bf->bf_m = NULL;
5477		bf->bf_flags &= ~ATH_BUF_BUSY;
5478
5479		ATH_TXBUF_LOCK(sc);
5480		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5481		ATH_TXBUF_UNLOCK(sc);
5482	}
5483}
5484
5485static void
5486ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5487{
5488	struct ath_hal *ah = sc->sc_ah;
5489
5490	DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5491	    __func__, txq->axq_qnum,
5492	    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5493	    txq->axq_link);
5494	(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5495}
5496
5497/*
5498 * Drain the transmit queues and reclaim resources.
5499 */
5500static void
5501ath_draintxq(struct ath_softc *sc)
5502{
5503	struct ath_hal *ah = sc->sc_ah;
5504	struct ifnet *ifp = sc->sc_ifp;
5505	int i;
5506
5507	/* XXX return value */
5508	if (!sc->sc_invalid) {
5509		/* don't touch the hardware if marked invalid */
5510		DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5511		    __func__, sc->sc_bhalq,
5512		    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5513		    NULL);
5514		(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5515		for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5516			if (ATH_TXQ_SETUP(sc, i))
5517				ath_tx_stopdma(sc, &sc->sc_txq[i]);
5518	}
5519	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5520		if (ATH_TXQ_SETUP(sc, i))
5521			ath_tx_draintxq(sc, &sc->sc_txq[i]);
5522#ifdef ATH_DEBUG
5523	if (sc->sc_debug & ATH_DEBUG_RESET) {
5524		struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
5525		if (bf != NULL && bf->bf_m != NULL) {
5526			ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5527				ath_hal_txprocdesc(ah, bf->bf_desc,
5528				    &bf->bf_status.ds_txstat) == HAL_OK);
5529			ieee80211_dump_pkt(ifp->if_l2com, mtod(bf->bf_m, caddr_t),
5530				bf->bf_m->m_len, 0, -1);
5531		}
5532	}
5533#endif /* ATH_DEBUG */
5534	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5535	sc->sc_wd_timer = 0;
5536}
5537
5538/*
5539 * Disable the receive h/w in preparation for a reset.
5540 */
5541static void
5542ath_stoprecv(struct ath_softc *sc)
5543{
5544#define	PA2DESC(_sc, _pa) \
5545	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5546		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5547	struct ath_hal *ah = sc->sc_ah;
5548
5549	ath_hal_stoppcurecv(ah);	/* disable PCU */
5550	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
5551	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
5552	DELAY(3000);			/* 3ms is long enough for 1 frame */
5553#ifdef ATH_DEBUG
5554	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5555		struct ath_buf *bf;
5556		u_int ix;
5557
5558		printf("%s: rx queue %p, link %p\n", __func__,
5559			(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5560		ix = 0;
5561		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5562			struct ath_desc *ds = bf->bf_desc;
5563			struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5564			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5565				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5566			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5567				ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5568			ix++;
5569		}
5570	}
5571#endif
5572	if (sc->sc_rxpending != NULL) {
5573		m_freem(sc->sc_rxpending);
5574		sc->sc_rxpending = NULL;
5575	}
5576	sc->sc_rxlink = NULL;		/* just in case */
5577#undef PA2DESC
5578}
5579
5580/*
5581 * Enable the receive h/w following a reset.
5582 */
5583static int
5584ath_startrecv(struct ath_softc *sc)
5585{
5586	struct ath_hal *ah = sc->sc_ah;
5587	struct ath_buf *bf;
5588
5589	sc->sc_rxlink = NULL;
5590	sc->sc_rxpending = NULL;
5591	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5592		int error = ath_rxbuf_init(sc, bf);
5593		if (error != 0) {
5594			DPRINTF(sc, ATH_DEBUG_RECV,
5595				"%s: ath_rxbuf_init failed %d\n",
5596				__func__, error);
5597			return error;
5598		}
5599	}
5600
5601	bf = STAILQ_FIRST(&sc->sc_rxbuf);
5602	ath_hal_putrxbuf(ah, bf->bf_daddr);
5603	ath_hal_rxena(ah);		/* enable recv descriptors */
5604	ath_mode_init(sc);		/* set filters, etc. */
5605	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
5606	return 0;
5607}
5608
5609/*
5610 * Update internal state after a channel change.
5611 */
5612static void
5613ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5614{
5615	enum ieee80211_phymode mode;
5616
5617	/*
5618	 * Change channels and update the h/w rate map
5619	 * if we're switching; e.g. 11a to 11b/g.
5620	 */
5621	mode = ieee80211_chan2mode(chan);
5622	if (mode != sc->sc_curmode)
5623		ath_setcurmode(sc, mode);
5624	sc->sc_curchan = chan;
5625
5626	sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags);
5627	sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags;
5628	sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
5629	sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq;
5630	sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee;
5631	sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee;
5632	sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower;
5633	sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow;
5634}
5635
5636/*
5637 * Set/change channels.  If the channel is really being changed,
5638 * it's done by reseting the chip.  To accomplish this we must
5639 * first cleanup any pending DMA, then restart stuff after a la
5640 * ath_init.
5641 */
5642static int
5643ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5644{
5645	struct ifnet *ifp = sc->sc_ifp;
5646	struct ieee80211com *ic = ifp->if_l2com;
5647	struct ath_hal *ah = sc->sc_ah;
5648
5649	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5650	    __func__, ieee80211_chan2ieee(ic, chan),
5651	    chan->ic_freq, chan->ic_flags);
5652	if (chan != sc->sc_curchan) {
5653		HAL_STATUS status;
5654		/*
5655		 * To switch channels clear any pending DMA operations;
5656		 * wait long enough for the RX fifo to drain, reset the
5657		 * hardware at the new frequency, and then re-enable
5658		 * the relevant bits of the h/w.
5659		 */
5660		ath_hal_intrset(ah, 0);		/* disable interrupts */
5661		ath_draintxq(sc);		/* clear pending tx frames */
5662		ath_stoprecv(sc);		/* turn off frame recv */
5663		if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5664			if_printf(ifp, "%s: unable to reset "
5665			    "channel %u (%u Mhz, flags 0x%x), hal status %u\n",
5666			    __func__, ieee80211_chan2ieee(ic, chan),
5667			    chan->ic_freq, chan->ic_flags, status);
5668			return EIO;
5669		}
5670		sc->sc_diversity = ath_hal_getdiversity(ah);
5671
5672		/*
5673		 * Re-enable rx framework.
5674		 */
5675		if (ath_startrecv(sc) != 0) {
5676			if_printf(ifp, "%s: unable to restart recv logic\n",
5677			    __func__);
5678			return EIO;
5679		}
5680
5681		/*
5682		 * Change channels and update the h/w rate map
5683		 * if we're switching; e.g. 11a to 11b/g.
5684		 */
5685		ath_chan_change(sc, chan);
5686
5687		/*
5688		 * Re-enable interrupts.
5689		 */
5690		ath_hal_intrset(ah, sc->sc_imask);
5691	}
5692	return 0;
5693}
5694
5695/*
5696 * Periodically recalibrate the PHY to account
5697 * for temperature/environment changes.
5698 */
5699static void
5700ath_calibrate(void *arg)
5701{
5702	struct ath_softc *sc = arg;
5703	struct ath_hal *ah = sc->sc_ah;
5704	struct ifnet *ifp = sc->sc_ifp;
5705	struct ieee80211com *ic = ifp->if_l2com;
5706	HAL_BOOL longCal, isCalDone;
5707	int nextcal;
5708
5709	if (ic->ic_flags & IEEE80211_F_SCAN)	/* defer, off channel */
5710		goto restart;
5711	longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5712	if (longCal) {
5713		sc->sc_stats.ast_per_cal++;
5714		if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5715			/*
5716			 * Rfgain is out of bounds, reset the chip
5717			 * to load new gain values.
5718			 */
5719			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5720				"%s: rfgain change\n", __func__);
5721			sc->sc_stats.ast_per_rfgain++;
5722			ath_reset(ifp);
5723		}
5724		/*
5725		 * If this long cal is after an idle period, then
5726		 * reset the data collection state so we start fresh.
5727		 */
5728		if (sc->sc_resetcal) {
5729			(void) ath_hal_calreset(ah, sc->sc_curchan);
5730			sc->sc_lastcalreset = ticks;
5731			sc->sc_resetcal = 0;
5732		}
5733	}
5734	if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5735		if (longCal) {
5736			/*
5737			 * Calibrate noise floor data again in case of change.
5738			 */
5739			ath_hal_process_noisefloor(ah);
5740		}
5741	} else {
5742		DPRINTF(sc, ATH_DEBUG_ANY,
5743			"%s: calibration of channel %u failed\n",
5744			__func__, sc->sc_curchan->ic_freq);
5745		sc->sc_stats.ast_per_calfail++;
5746	}
5747	if (!isCalDone) {
5748restart:
5749		/*
5750		 * Use a shorter interval to potentially collect multiple
5751		 * data samples required to complete calibration.  Once
5752		 * we're told the work is done we drop back to a longer
5753		 * interval between requests.  We're more aggressive doing
5754		 * work when operating as an AP to improve operation right
5755		 * after startup.
5756		 */
5757		nextcal = (1000*ath_shortcalinterval)/hz;
5758		if (sc->sc_opmode != HAL_M_HOSTAP)
5759			nextcal *= 10;
5760	} else {
5761		nextcal = ath_longcalinterval*hz;
5762		sc->sc_lastlongcal = ticks;
5763		if (sc->sc_lastcalreset == 0)
5764			sc->sc_lastcalreset = sc->sc_lastlongcal;
5765		else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5766			sc->sc_resetcal = 1;	/* setup reset next trip */
5767	}
5768
5769	if (nextcal != 0) {
5770		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5771		    __func__, nextcal, isCalDone ? "" : "!");
5772		callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5773	} else {
5774		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5775		    __func__);
5776		/* NB: don't rearm timer */
5777	}
5778}
5779
5780static void
5781ath_scan_start(struct ieee80211com *ic)
5782{
5783	struct ifnet *ifp = ic->ic_ifp;
5784	struct ath_softc *sc = ifp->if_softc;
5785	struct ath_hal *ah = sc->sc_ah;
5786	u_int32_t rfilt;
5787
5788	/* XXX calibration timer? */
5789
5790	sc->sc_scanning = 1;
5791	sc->sc_syncbeacon = 0;
5792	rfilt = ath_calcrxfilter(sc);
5793	ath_hal_setrxfilter(ah, rfilt);
5794	ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5795
5796	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5797		 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5798}
5799
5800static void
5801ath_scan_end(struct ieee80211com *ic)
5802{
5803	struct ifnet *ifp = ic->ic_ifp;
5804	struct ath_softc *sc = ifp->if_softc;
5805	struct ath_hal *ah = sc->sc_ah;
5806	u_int32_t rfilt;
5807
5808	sc->sc_scanning = 0;
5809	rfilt = ath_calcrxfilter(sc);
5810	ath_hal_setrxfilter(ah, rfilt);
5811	ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5812
5813	ath_hal_process_noisefloor(ah);
5814
5815	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5816		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5817		 sc->sc_curaid);
5818}
5819
5820static void
5821ath_set_channel(struct ieee80211com *ic)
5822{
5823	struct ifnet *ifp = ic->ic_ifp;
5824	struct ath_softc *sc = ifp->if_softc;
5825
5826	(void) ath_chan_set(sc, ic->ic_curchan);
5827	/*
5828	 * If we are returning to our bss channel then mark state
5829	 * so the next recv'd beacon's tsf will be used to sync the
5830	 * beacon timers.  Note that since we only hear beacons in
5831	 * sta/ibss mode this has no effect in other operating modes.
5832	 */
5833	if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5834		sc->sc_syncbeacon = 1;
5835}
5836
5837/*
5838 * Walk the vap list and check if there any vap's in RUN state.
5839 */
5840static int
5841ath_isanyrunningvaps(struct ieee80211vap *this)
5842{
5843	struct ieee80211com *ic = this->iv_ic;
5844	struct ieee80211vap *vap;
5845
5846	IEEE80211_LOCK_ASSERT(ic);
5847
5848	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5849		if (vap != this && vap->iv_state == IEEE80211_S_RUN)
5850			return 1;
5851	}
5852	return 0;
5853}
5854
5855static int
5856ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5857{
5858	struct ieee80211com *ic = vap->iv_ic;
5859	struct ath_softc *sc = ic->ic_ifp->if_softc;
5860	struct ath_vap *avp = ATH_VAP(vap);
5861	struct ath_hal *ah = sc->sc_ah;
5862	struct ieee80211_node *ni = NULL;
5863	int i, error, stamode;
5864	u_int32_t rfilt;
5865	static const HAL_LED_STATE leds[] = {
5866	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
5867	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
5868	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
5869	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
5870	    HAL_LED_RUN, 	/* IEEE80211_S_CAC */
5871	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
5872	    HAL_LED_RUN, 	/* IEEE80211_S_CSA */
5873	    HAL_LED_RUN, 	/* IEEE80211_S_SLEEP */
5874	};
5875
5876	DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5877		ieee80211_state_name[vap->iv_state],
5878		ieee80211_state_name[nstate]);
5879
5880	callout_drain(&sc->sc_cal_ch);
5881	ath_hal_setledstate(ah, leds[nstate]);	/* set LED */
5882
5883	if (nstate == IEEE80211_S_SCAN) {
5884		/*
5885		 * Scanning: turn off beacon miss and don't beacon.
5886		 * Mark beacon state so when we reach RUN state we'll
5887		 * [re]setup beacons.  Unblock the task q thread so
5888		 * deferred interrupt processing is done.
5889		 */
5890		ath_hal_intrset(ah,
5891		    sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5892		sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5893		sc->sc_beacons = 0;
5894		taskqueue_unblock(sc->sc_tq);
5895	}
5896
5897	ni = vap->iv_bss;
5898	rfilt = ath_calcrxfilter(sc);
5899	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5900		   vap->iv_opmode == IEEE80211_M_AHDEMO ||
5901		   vap->iv_opmode == IEEE80211_M_IBSS);
5902	if (stamode && nstate == IEEE80211_S_RUN) {
5903		sc->sc_curaid = ni->ni_associd;
5904		IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5905		ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5906	}
5907	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5908	   __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5909	ath_hal_setrxfilter(ah, rfilt);
5910
5911	/* XXX is this to restore keycache on resume? */
5912	if (vap->iv_opmode != IEEE80211_M_STA &&
5913	    (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5914		for (i = 0; i < IEEE80211_WEP_NKID; i++)
5915			if (ath_hal_keyisvalid(ah, i))
5916				ath_hal_keysetmac(ah, i, ni->ni_bssid);
5917	}
5918
5919	/*
5920	 * Invoke the parent method to do net80211 work.
5921	 */
5922	error = avp->av_newstate(vap, nstate, arg);
5923	if (error != 0)
5924		goto bad;
5925
5926	if (nstate == IEEE80211_S_RUN) {
5927		/* NB: collect bss node again, it may have changed */
5928		ni = vap->iv_bss;
5929
5930		DPRINTF(sc, ATH_DEBUG_STATE,
5931		    "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5932		    "capinfo 0x%04x chan %d\n", __func__,
5933		    vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5934		    ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5935
5936		switch (vap->iv_opmode) {
5937#ifdef IEEE80211_SUPPORT_TDMA
5938		case IEEE80211_M_AHDEMO:
5939			if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5940				break;
5941			/* fall thru... */
5942#endif
5943		case IEEE80211_M_HOSTAP:
5944		case IEEE80211_M_IBSS:
5945			/*
5946			 * Allocate and setup the beacon frame.
5947			 *
5948			 * Stop any previous beacon DMA.  This may be
5949			 * necessary, for example, when an ibss merge
5950			 * causes reconfiguration; there will be a state
5951			 * transition from RUN->RUN that means we may
5952			 * be called with beacon transmission active.
5953			 */
5954			ath_hal_stoptxdma(ah, sc->sc_bhalq);
5955
5956			error = ath_beacon_alloc(sc, ni);
5957			if (error != 0)
5958				goto bad;
5959			/*
5960			 * If joining an adhoc network defer beacon timer
5961			 * configuration to the next beacon frame so we
5962			 * have a current TSF to use.  Otherwise we're
5963			 * starting an ibss/bss so there's no need to delay;
5964			 * if this is the first vap moving to RUN state, then
5965			 * beacon state needs to be [re]configured.
5966			 */
5967			if (vap->iv_opmode == IEEE80211_M_IBSS &&
5968			    ni->ni_tstamp.tsf != 0) {
5969				sc->sc_syncbeacon = 1;
5970			} else if (!sc->sc_beacons) {
5971#ifdef IEEE80211_SUPPORT_TDMA
5972				if (vap->iv_caps & IEEE80211_C_TDMA)
5973					ath_tdma_config(sc, vap);
5974				else
5975#endif
5976					ath_beacon_config(sc, vap);
5977				sc->sc_beacons = 1;
5978			}
5979			break;
5980		case IEEE80211_M_STA:
5981			/*
5982			 * Defer beacon timer configuration to the next
5983			 * beacon frame so we have a current TSF to use
5984			 * (any TSF collected when scanning is likely old).
5985			 */
5986			sc->sc_syncbeacon = 1;
5987			break;
5988		case IEEE80211_M_MONITOR:
5989			/*
5990			 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5991			 * transitions so we must re-enable interrupts here to
5992			 * handle the case of a single monitor mode vap.
5993			 */
5994			ath_hal_intrset(ah, sc->sc_imask);
5995			break;
5996		case IEEE80211_M_WDS:
5997			break;
5998		default:
5999			break;
6000		}
6001		/*
6002		 * Let the hal process statistics collected during a
6003		 * scan so it can provide calibrated noise floor data.
6004		 */
6005		ath_hal_process_noisefloor(ah);
6006		/*
6007		 * Reset rssi stats; maybe not the best place...
6008		 */
6009		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
6010		sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
6011		sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
6012		/*
6013		 * Finally, start any timers and the task q thread
6014		 * (in case we didn't go through SCAN state).
6015		 */
6016		if (ath_longcalinterval != 0) {
6017			/* start periodic recalibration timer */
6018			callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
6019		} else {
6020			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
6021			    "%s: calibration disabled\n", __func__);
6022		}
6023		taskqueue_unblock(sc->sc_tq);
6024	} else if (nstate == IEEE80211_S_INIT) {
6025		/*
6026		 * If there are no vaps left in RUN state then
6027		 * shutdown host/driver operation:
6028		 * o disable interrupts
6029		 * o disable the task queue thread
6030		 * o mark beacon processing as stopped
6031		 */
6032		if (!ath_isanyrunningvaps(vap)) {
6033			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
6034			/* disable interrupts  */
6035			ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
6036			taskqueue_block(sc->sc_tq);
6037			sc->sc_beacons = 0;
6038		}
6039#ifdef IEEE80211_SUPPORT_TDMA
6040		ath_hal_setcca(ah, AH_TRUE);
6041#endif
6042	}
6043bad:
6044	return error;
6045}
6046
6047/*
6048 * Allocate a key cache slot to the station so we can
6049 * setup a mapping from key index to node. The key cache
6050 * slot is needed for managing antenna state and for
6051 * compression when stations do not use crypto.  We do
6052 * it uniliaterally here; if crypto is employed this slot
6053 * will be reassigned.
6054 */
6055static void
6056ath_setup_stationkey(struct ieee80211_node *ni)
6057{
6058	struct ieee80211vap *vap = ni->ni_vap;
6059	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6060	ieee80211_keyix keyix, rxkeyix;
6061
6062	if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
6063		/*
6064		 * Key cache is full; we'll fall back to doing
6065		 * the more expensive lookup in software.  Note
6066		 * this also means no h/w compression.
6067		 */
6068		/* XXX msg+statistic */
6069	} else {
6070		/* XXX locking? */
6071		ni->ni_ucastkey.wk_keyix = keyix;
6072		ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
6073		/* NB: must mark device key to get called back on delete */
6074		ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
6075		IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
6076		/* NB: this will create a pass-thru key entry */
6077		ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
6078	}
6079}
6080
6081/*
6082 * Setup driver-specific state for a newly associated node.
6083 * Note that we're called also on a re-associate, the isnew
6084 * param tells us if this is the first time or not.
6085 */
6086static void
6087ath_newassoc(struct ieee80211_node *ni, int isnew)
6088{
6089	struct ath_node *an = ATH_NODE(ni);
6090	struct ieee80211vap *vap = ni->ni_vap;
6091	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6092	const struct ieee80211_txparam *tp = ni->ni_txparms;
6093
6094	an->an_mcastrix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
6095	an->an_mgmtrix = ath_tx_findrix(sc->sc_currates, tp->mgmtrate);
6096
6097	ath_rate_newassoc(sc, an, isnew);
6098	if (isnew &&
6099	    (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
6100	    ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
6101		ath_setup_stationkey(ni);
6102}
6103
6104static int
6105ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
6106	int nchans, struct ieee80211_channel chans[])
6107{
6108	struct ath_softc *sc = ic->ic_ifp->if_softc;
6109	struct ath_hal *ah = sc->sc_ah;
6110	HAL_STATUS status;
6111
6112	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6113	    "%s: rd %u cc %u location %c%s\n",
6114	    __func__, reg->regdomain, reg->country, reg->location,
6115	    reg->ecm ? " ecm" : "");
6116
6117	status = ath_hal_set_channels(ah, chans, nchans,
6118	    reg->country, reg->regdomain);
6119	if (status != HAL_OK) {
6120		DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
6121		    __func__, status);
6122		return EINVAL;		/* XXX */
6123	}
6124	return 0;
6125}
6126
6127static void
6128ath_getradiocaps(struct ieee80211com *ic,
6129	int maxchans, int *nchans, struct ieee80211_channel chans[])
6130{
6131	struct ath_softc *sc = ic->ic_ifp->if_softc;
6132	struct ath_hal *ah = sc->sc_ah;
6133
6134	DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
6135	    __func__, SKU_DEBUG, CTRY_DEFAULT);
6136
6137	/* XXX check return */
6138	(void) ath_hal_getchannels(ah, chans, maxchans, nchans,
6139	    HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
6140
6141}
6142
6143static int
6144ath_getchannels(struct ath_softc *sc)
6145{
6146	struct ifnet *ifp = sc->sc_ifp;
6147	struct ieee80211com *ic = ifp->if_l2com;
6148	struct ath_hal *ah = sc->sc_ah;
6149	HAL_STATUS status;
6150
6151	/*
6152	 * Collect channel set based on EEPROM contents.
6153	 */
6154	status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
6155	    &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
6156	if (status != HAL_OK) {
6157		if_printf(ifp, "%s: unable to collect channel list from hal, "
6158		    "status %d\n", __func__, status);
6159		return EINVAL;
6160	}
6161	(void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6162	ath_hal_getcountrycode(ah, &sc->sc_eecc);	/* NB: cannot fail */
6163	/* XXX map Atheros sku's to net80211 SKU's */
6164	/* XXX net80211 types too small */
6165	ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
6166	ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
6167	ic->ic_regdomain.isocc[0] = ' ';	/* XXX don't know */
6168	ic->ic_regdomain.isocc[1] = ' ';
6169
6170	ic->ic_regdomain.ecm = 1;
6171	ic->ic_regdomain.location = 'I';
6172
6173	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6174	    "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
6175	    __func__, sc->sc_eerd, sc->sc_eecc,
6176	    ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6177	    ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
6178	return 0;
6179}
6180
6181static void
6182ath_led_done(void *arg)
6183{
6184	struct ath_softc *sc = arg;
6185
6186	sc->sc_blinking = 0;
6187}
6188
6189/*
6190 * Turn the LED off: flip the pin and then set a timer so no
6191 * update will happen for the specified duration.
6192 */
6193static void
6194ath_led_off(void *arg)
6195{
6196	struct ath_softc *sc = arg;
6197
6198	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
6199	callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
6200}
6201
6202/*
6203 * Blink the LED according to the specified on/off times.
6204 */
6205static void
6206ath_led_blink(struct ath_softc *sc, int on, int off)
6207{
6208	DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
6209	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
6210	sc->sc_blinking = 1;
6211	sc->sc_ledoff = off;
6212	callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
6213}
6214
6215static void
6216ath_led_event(struct ath_softc *sc, int rix)
6217{
6218	sc->sc_ledevent = ticks;	/* time of last event */
6219	if (sc->sc_blinking)		/* don't interrupt active blink */
6220		return;
6221	ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
6222}
6223
6224static int
6225ath_rate_setup(struct ath_softc *sc, u_int mode)
6226{
6227	struct ath_hal *ah = sc->sc_ah;
6228	const HAL_RATE_TABLE *rt;
6229
6230	switch (mode) {
6231	case IEEE80211_MODE_11A:
6232		rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6233		break;
6234	case IEEE80211_MODE_HALF:
6235		rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6236		break;
6237	case IEEE80211_MODE_QUARTER:
6238		rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6239		break;
6240	case IEEE80211_MODE_11B:
6241		rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6242		break;
6243	case IEEE80211_MODE_11G:
6244		rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6245		break;
6246	case IEEE80211_MODE_TURBO_A:
6247		rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6248		break;
6249	case IEEE80211_MODE_TURBO_G:
6250		rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6251		break;
6252	case IEEE80211_MODE_STURBO_A:
6253		rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6254		break;
6255	case IEEE80211_MODE_11NA:
6256		rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6257		break;
6258	case IEEE80211_MODE_11NG:
6259		rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6260		break;
6261	default:
6262		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6263			__func__, mode);
6264		return 0;
6265	}
6266	sc->sc_rates[mode] = rt;
6267	return (rt != NULL);
6268}
6269
6270static void
6271ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6272{
6273#define	N(a)	(sizeof(a)/sizeof(a[0]))
6274	/* NB: on/off times from the Atheros NDIS driver, w/ permission */
6275	static const struct {
6276		u_int		rate;		/* tx/rx 802.11 rate */
6277		u_int16_t	timeOn;		/* LED on time (ms) */
6278		u_int16_t	timeOff;	/* LED off time (ms) */
6279	} blinkrates[] = {
6280		{ 108,  40,  10 },
6281		{  96,  44,  11 },
6282		{  72,  50,  13 },
6283		{  48,  57,  14 },
6284		{  36,  67,  16 },
6285		{  24,  80,  20 },
6286		{  22, 100,  25 },
6287		{  18, 133,  34 },
6288		{  12, 160,  40 },
6289		{  10, 200,  50 },
6290		{   6, 240,  58 },
6291		{   4, 267,  66 },
6292		{   2, 400, 100 },
6293		{   0, 500, 130 },
6294		/* XXX half/quarter rates */
6295	};
6296	const HAL_RATE_TABLE *rt;
6297	int i, j;
6298
6299	memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6300	rt = sc->sc_rates[mode];
6301	KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6302	for (i = 0; i < rt->rateCount; i++) {
6303		uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6304		if (rt->info[i].phy != IEEE80211_T_HT)
6305			sc->sc_rixmap[ieeerate] = i;
6306		else
6307			sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6308	}
6309	memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6310	for (i = 0; i < N(sc->sc_hwmap); i++) {
6311		if (i >= rt->rateCount) {
6312			sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6313			sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6314			continue;
6315		}
6316		sc->sc_hwmap[i].ieeerate =
6317			rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6318		if (rt->info[i].phy == IEEE80211_T_HT)
6319			sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6320		sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6321		if (rt->info[i].shortPreamble ||
6322		    rt->info[i].phy == IEEE80211_T_OFDM)
6323			sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6324		/* NB: receive frames include FCS */
6325		sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags |
6326			IEEE80211_RADIOTAP_F_FCS;
6327		/* setup blink rate table to avoid per-packet lookup */
6328		for (j = 0; j < N(blinkrates)-1; j++)
6329			if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6330				break;
6331		/* NB: this uses the last entry if the rate isn't found */
6332		/* XXX beware of overlow */
6333		sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6334		sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6335	}
6336	sc->sc_currates = rt;
6337	sc->sc_curmode = mode;
6338	/*
6339	 * All protection frames are transmited at 2Mb/s for
6340	 * 11g, otherwise at 1Mb/s.
6341	 */
6342	if (mode == IEEE80211_MODE_11G)
6343		sc->sc_protrix = ath_tx_findrix(rt, 2*2);
6344	else
6345		sc->sc_protrix = ath_tx_findrix(rt, 2*1);
6346	/* NB: caller is responsible for reseting rate control state */
6347#undef N
6348}
6349
6350#ifdef ATH_DEBUG
6351static void
6352ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6353	u_int ix, int done)
6354{
6355	const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
6356	struct ath_hal *ah = sc->sc_ah;
6357	const struct ath_desc *ds;
6358	int i;
6359
6360	for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6361		printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n"
6362		       "      %08x %08x %08x %08x\n",
6363		    ix, ds, (const struct ath_desc *)bf->bf_daddr + i,
6364		    ds->ds_link, ds->ds_data,
6365		    !done ? "" : (rs->rs_status == 0) ? " *" : " !",
6366		    ds->ds_ctl0, ds->ds_ctl1,
6367		    ds->ds_hw[0], ds->ds_hw[1]);
6368		if (ah->ah_magic == 0x20065416) {
6369			printf("        %08x %08x %08x %08x %08x %08x %08x\n",
6370			    ds->ds_hw[2], ds->ds_hw[3], ds->ds_hw[4],
6371			    ds->ds_hw[5], ds->ds_hw[6], ds->ds_hw[7],
6372			    ds->ds_hw[8]);
6373		}
6374	}
6375}
6376
6377static void
6378ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6379	u_int qnum, u_int ix, int done)
6380{
6381	const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
6382	struct ath_hal *ah = sc->sc_ah;
6383	const struct ath_desc *ds;
6384	int i;
6385
6386	printf("Q%u[%3u]", qnum, ix);
6387	for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6388		printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
6389		       "        %08x %08x %08x %08x %08x %08x\n",
6390		    ds, (const struct ath_desc *)bf->bf_daddr + i,
6391		    ds->ds_link, ds->ds_data, bf->bf_txflags,
6392		    !done ? "" : (ts->ts_status == 0) ? " *" : " !",
6393		    ds->ds_ctl0, ds->ds_ctl1,
6394		    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
6395		if (ah->ah_magic == 0x20065416) {
6396			printf("        %08x %08x %08x %08x %08x %08x %08x %08x\n",
6397			    ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
6398			    ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
6399			    ds->ds_hw[10],ds->ds_hw[11]);
6400			printf("        %08x %08x %08x %08x %08x %08x %08x %08x\n",
6401			    ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
6402			    ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
6403			    ds->ds_hw[18], ds->ds_hw[19]);
6404		}
6405	}
6406}
6407#endif /* ATH_DEBUG */
6408
6409static void
6410ath_watchdog(void *arg)
6411{
6412	struct ath_softc *sc = arg;
6413
6414	if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6415		struct ifnet *ifp = sc->sc_ifp;
6416		uint32_t hangs;
6417
6418		if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6419		    hangs != 0) {
6420			if_printf(ifp, "%s hang detected (0x%x)\n",
6421			    hangs & 0xff ? "bb" : "mac", hangs);
6422		} else
6423			if_printf(ifp, "device timeout\n");
6424		ath_reset(ifp);
6425		ifp->if_oerrors++;
6426		sc->sc_stats.ast_watchdog++;
6427	}
6428	callout_schedule(&sc->sc_wd_ch, hz);
6429}
6430
6431#ifdef ATH_DIAGAPI
6432/*
6433 * Diagnostic interface to the HAL.  This is used by various
6434 * tools to do things like retrieve register contents for
6435 * debugging.  The mechanism is intentionally opaque so that
6436 * it can change frequently w/o concern for compatiblity.
6437 */
6438static int
6439ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6440{
6441	struct ath_hal *ah = sc->sc_ah;
6442	u_int id = ad->ad_id & ATH_DIAG_ID;
6443	void *indata = NULL;
6444	void *outdata = NULL;
6445	u_int32_t insize = ad->ad_in_size;
6446	u_int32_t outsize = ad->ad_out_size;
6447	int error = 0;
6448
6449	if (ad->ad_id & ATH_DIAG_IN) {
6450		/*
6451		 * Copy in data.
6452		 */
6453		indata = malloc(insize, M_TEMP, M_NOWAIT);
6454		if (indata == NULL) {
6455			error = ENOMEM;
6456			goto bad;
6457		}
6458		error = copyin(ad->ad_in_data, indata, insize);
6459		if (error)
6460			goto bad;
6461	}
6462	if (ad->ad_id & ATH_DIAG_DYN) {
6463		/*
6464		 * Allocate a buffer for the results (otherwise the HAL
6465		 * returns a pointer to a buffer where we can read the
6466		 * results).  Note that we depend on the HAL leaving this
6467		 * pointer for us to use below in reclaiming the buffer;
6468		 * may want to be more defensive.
6469		 */
6470		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6471		if (outdata == NULL) {
6472			error = ENOMEM;
6473			goto bad;
6474		}
6475	}
6476	if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6477		if (outsize < ad->ad_out_size)
6478			ad->ad_out_size = outsize;
6479		if (outdata != NULL)
6480			error = copyout(outdata, ad->ad_out_data,
6481					ad->ad_out_size);
6482	} else {
6483		error = EINVAL;
6484	}
6485bad:
6486	if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6487		free(indata, M_TEMP);
6488	if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6489		free(outdata, M_TEMP);
6490	return error;
6491}
6492#endif /* ATH_DIAGAPI */
6493
6494static int
6495ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6496{
6497#define	IS_RUNNING(ifp) \
6498	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6499	struct ath_softc *sc = ifp->if_softc;
6500	struct ieee80211com *ic = ifp->if_l2com;
6501	struct ifreq *ifr = (struct ifreq *)data;
6502	const HAL_RATE_TABLE *rt;
6503	int error = 0;
6504
6505	switch (cmd) {
6506	case SIOCSIFFLAGS:
6507		ATH_LOCK(sc);
6508		if (IS_RUNNING(ifp)) {
6509			/*
6510			 * To avoid rescanning another access point,
6511			 * do not call ath_init() here.  Instead,
6512			 * only reflect promisc mode settings.
6513			 */
6514			ath_mode_init(sc);
6515		} else if (ifp->if_flags & IFF_UP) {
6516			/*
6517			 * Beware of being called during attach/detach
6518			 * to reset promiscuous mode.  In that case we
6519			 * will still be marked UP but not RUNNING.
6520			 * However trying to re-init the interface
6521			 * is the wrong thing to do as we've already
6522			 * torn down much of our state.  There's
6523			 * probably a better way to deal with this.
6524			 */
6525			if (!sc->sc_invalid)
6526				ath_init(sc);	/* XXX lose error */
6527		} else {
6528			ath_stop_locked(ifp);
6529#ifdef notyet
6530			/* XXX must wakeup in places like ath_vap_delete */
6531			if (!sc->sc_invalid)
6532				ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6533#endif
6534		}
6535		ATH_UNLOCK(sc);
6536		break;
6537	case SIOCGIFMEDIA:
6538	case SIOCSIFMEDIA:
6539		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6540		break;
6541	case SIOCGATHSTATS:
6542		/* NB: embed these numbers to get a consistent view */
6543		sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6544		sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6545		sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6546		sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6547#ifdef IEEE80211_SUPPORT_TDMA
6548		sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6549		sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6550#endif
6551		rt = sc->sc_currates;
6552		/* XXX HT rates */
6553		sc->sc_stats.ast_tx_rate =
6554		    rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6555		return copyout(&sc->sc_stats,
6556		    ifr->ifr_data, sizeof (sc->sc_stats));
6557	case SIOCZATHSTATS:
6558		error = priv_check(curthread, PRIV_DRIVER);
6559		if (error == 0)
6560			memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6561		break;
6562#ifdef ATH_DIAGAPI
6563	case SIOCGATHDIAG:
6564		error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6565		break;
6566#endif
6567	case SIOCGIFADDR:
6568		error = ether_ioctl(ifp, cmd, data);
6569		break;
6570	default:
6571		error = EINVAL;
6572		break;
6573	}
6574	return error;
6575#undef IS_RUNNING
6576}
6577
6578static int
6579ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
6580{
6581	struct ath_softc *sc = arg1;
6582	u_int slottime = ath_hal_getslottime(sc->sc_ah);
6583	int error;
6584
6585	error = sysctl_handle_int(oidp, &slottime, 0, req);
6586	if (error || !req->newptr)
6587		return error;
6588	return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
6589}
6590
6591static int
6592ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
6593{
6594	struct ath_softc *sc = arg1;
6595	u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah);
6596	int error;
6597
6598	error = sysctl_handle_int(oidp, &acktimeout, 0, req);
6599	if (error || !req->newptr)
6600		return error;
6601	return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
6602}
6603
6604static int
6605ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
6606{
6607	struct ath_softc *sc = arg1;
6608	u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
6609	int error;
6610
6611	error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
6612	if (error || !req->newptr)
6613		return error;
6614	return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
6615}
6616
6617static int
6618ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
6619{
6620	struct ath_softc *sc = arg1;
6621	int softled = sc->sc_softled;
6622	int error;
6623
6624	error = sysctl_handle_int(oidp, &softled, 0, req);
6625	if (error || !req->newptr)
6626		return error;
6627	softled = (softled != 0);
6628	if (softled != sc->sc_softled) {
6629		if (softled) {
6630			/* NB: handle any sc_ledpin change */
6631			ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6632			    HAL_GPIO_MUX_MAC_NETWORK_LED);
6633			ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6634				!sc->sc_ledon);
6635		}
6636		sc->sc_softled = softled;
6637	}
6638	return 0;
6639}
6640
6641static int
6642ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
6643{
6644	struct ath_softc *sc = arg1;
6645	int ledpin = sc->sc_ledpin;
6646	int error;
6647
6648	error = sysctl_handle_int(oidp, &ledpin, 0, req);
6649	if (error || !req->newptr)
6650		return error;
6651	if (ledpin != sc->sc_ledpin) {
6652		sc->sc_ledpin = ledpin;
6653		if (sc->sc_softled) {
6654			ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6655			    HAL_GPIO_MUX_MAC_NETWORK_LED);
6656			ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6657				!sc->sc_ledon);
6658		}
6659	}
6660	return 0;
6661}
6662
6663static int
6664ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
6665{
6666	struct ath_softc *sc = arg1;
6667	u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah);
6668	int error;
6669
6670	error = sysctl_handle_int(oidp, &txantenna, 0, req);
6671	if (!error && req->newptr) {
6672		/* XXX assumes 2 antenna ports */
6673		if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B)
6674			return EINVAL;
6675		ath_hal_setantennaswitch(sc->sc_ah, txantenna);
6676		/*
6677		 * NB: with the switch locked this isn't meaningful,
6678		 *     but set it anyway so things like radiotap get
6679		 *     consistent info in their data.
6680		 */
6681		sc->sc_txantenna = txantenna;
6682	}
6683	return error;
6684}
6685
6686static int
6687ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
6688{
6689	struct ath_softc *sc = arg1;
6690	u_int defantenna = ath_hal_getdefantenna(sc->sc_ah);
6691	int error;
6692
6693	error = sysctl_handle_int(oidp, &defantenna, 0, req);
6694	if (!error && req->newptr)
6695		ath_hal_setdefantenna(sc->sc_ah, defantenna);
6696	return error;
6697}
6698
6699static int
6700ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
6701{
6702	struct ath_softc *sc = arg1;
6703	u_int diversity = ath_hal_getdiversity(sc->sc_ah);
6704	int error;
6705
6706	error = sysctl_handle_int(oidp, &diversity, 0, req);
6707	if (error || !req->newptr)
6708		return error;
6709	if (!ath_hal_setdiversity(sc->sc_ah, diversity))
6710		return EINVAL;
6711	sc->sc_diversity = diversity;
6712	return 0;
6713}
6714
6715static int
6716ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
6717{
6718	struct ath_softc *sc = arg1;
6719	u_int32_t diag;
6720	int error;
6721
6722	if (!ath_hal_getdiag(sc->sc_ah, &diag))
6723		return EINVAL;
6724	error = sysctl_handle_int(oidp, &diag, 0, req);
6725	if (error || !req->newptr)
6726		return error;
6727	return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
6728}
6729
6730static int
6731ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
6732{
6733	struct ath_softc *sc = arg1;
6734	struct ifnet *ifp = sc->sc_ifp;
6735	u_int32_t scale;
6736	int error;
6737
6738	(void) ath_hal_gettpscale(sc->sc_ah, &scale);
6739	error = sysctl_handle_int(oidp, &scale, 0, req);
6740	if (error || !req->newptr)
6741		return error;
6742	return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
6743	    (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6744}
6745
6746static int
6747ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
6748{
6749	struct ath_softc *sc = arg1;
6750	u_int tpc = ath_hal_gettpc(sc->sc_ah);
6751	int error;
6752
6753	error = sysctl_handle_int(oidp, &tpc, 0, req);
6754	if (error || !req->newptr)
6755		return error;
6756	return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
6757}
6758
6759static int
6760ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
6761{
6762	struct ath_softc *sc = arg1;
6763	struct ifnet *ifp = sc->sc_ifp;
6764	struct ath_hal *ah = sc->sc_ah;
6765	u_int rfkill = ath_hal_getrfkill(ah);
6766	int error;
6767
6768	error = sysctl_handle_int(oidp, &rfkill, 0, req);
6769	if (error || !req->newptr)
6770		return error;
6771	if (rfkill == ath_hal_getrfkill(ah))	/* unchanged */
6772		return 0;
6773	if (!ath_hal_setrfkill(ah, rfkill))
6774		return EINVAL;
6775	return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6776}
6777
6778static int
6779ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
6780{
6781	struct ath_softc *sc = arg1;
6782	u_int rfsilent;
6783	int error;
6784
6785	(void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
6786	error = sysctl_handle_int(oidp, &rfsilent, 0, req);
6787	if (error || !req->newptr)
6788		return error;
6789	if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
6790		return EINVAL;
6791	sc->sc_rfsilentpin = rfsilent & 0x1c;
6792	sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
6793	return 0;
6794}
6795
6796static int
6797ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
6798{
6799	struct ath_softc *sc = arg1;
6800	u_int32_t tpack;
6801	int error;
6802
6803	(void) ath_hal_gettpack(sc->sc_ah, &tpack);
6804	error = sysctl_handle_int(oidp, &tpack, 0, req);
6805	if (error || !req->newptr)
6806		return error;
6807	return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
6808}
6809
6810static int
6811ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
6812{
6813	struct ath_softc *sc = arg1;
6814	u_int32_t tpcts;
6815	int error;
6816
6817	(void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
6818	error = sysctl_handle_int(oidp, &tpcts, 0, req);
6819	if (error || !req->newptr)
6820		return error;
6821	return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
6822}
6823
6824static int
6825ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
6826{
6827	struct ath_softc *sc = arg1;
6828	int intmit, error;
6829
6830	intmit = ath_hal_getintmit(sc->sc_ah);
6831	error = sysctl_handle_int(oidp, &intmit, 0, req);
6832	if (error || !req->newptr)
6833		return error;
6834	return !ath_hal_setintmit(sc->sc_ah, intmit) ? EINVAL : 0;
6835}
6836
6837#ifdef IEEE80211_SUPPORT_TDMA
6838static int
6839ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
6840{
6841	struct ath_softc *sc = arg1;
6842	int setcca, error;
6843
6844	setcca = sc->sc_setcca;
6845	error = sysctl_handle_int(oidp, &setcca, 0, req);
6846	if (error || !req->newptr)
6847		return error;
6848	sc->sc_setcca = (setcca != 0);
6849	return 0;
6850}
6851#endif /* IEEE80211_SUPPORT_TDMA */
6852
6853static void
6854ath_sysctlattach(struct ath_softc *sc)
6855{
6856	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6857	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6858	struct ath_hal *ah = sc->sc_ah;
6859
6860	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6861		"countrycode", CTLFLAG_RD, &sc->sc_eecc, 0,
6862		"EEPROM country code");
6863	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6864		"regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
6865		"EEPROM regdomain code");
6866#ifdef	ATH_DEBUG
6867	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6868		"debug", CTLFLAG_RW, &sc->sc_debug, 0,
6869		"control debugging printfs");
6870#endif
6871	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6872		"slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6873		ath_sysctl_slottime, "I", "802.11 slot time (us)");
6874	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6875		"acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6876		ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
6877	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6878		"ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6879		ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
6880	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6881		"softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6882		ath_sysctl_softled, "I", "enable/disable software LED support");
6883	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6884		"ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6885		ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
6886	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6887		"ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
6888		"setting to turn LED on");
6889	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6890		"ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
6891		"idle time for inactivity LED (ticks)");
6892	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6893		"txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6894		ath_sysctl_txantenna, "I", "antenna switch");
6895	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6896		"rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6897		ath_sysctl_rxantenna, "I", "default/rx antenna");
6898	if (ath_hal_hasdiversity(ah))
6899		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6900			"diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6901			ath_sysctl_diversity, "I", "antenna diversity");
6902	sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
6903	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6904		"txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
6905		"tx descriptor batching");
6906	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6907		"diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6908		ath_sysctl_diag, "I", "h/w diagnostic control");
6909	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6910		"tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6911		ath_sysctl_tpscale, "I", "tx power scaling");
6912	if (ath_hal_hastpc(ah)) {
6913		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6914			"tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6915			ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
6916		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6917			"tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6918			ath_sysctl_tpack, "I", "tx power for ack frames");
6919		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6920			"tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6921			ath_sysctl_tpcts, "I", "tx power for cts frames");
6922	}
6923	if (ath_hal_hasfastframes(sc->sc_ah)) {
6924		sc->sc_fftxqmin = ATH_FF_TXQMIN;
6925		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6926			"fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0,
6927			"min frames before fast-frame staging");
6928		sc->sc_fftxqmax = ATH_FF_TXQMAX;
6929		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6930			"fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0,
6931			"max queued frames before tail drop");
6932	}
6933	if (ath_hal_hasrfsilent(ah)) {
6934		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6935			"rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6936			ath_sysctl_rfsilent, "I", "h/w RF silent config");
6937		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6938			"rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6939			ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
6940	}
6941	if (ath_hal_hasintmit(ah)) {
6942		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6943			"intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6944			ath_sysctl_intmit, "I", "interference mitigation");
6945	}
6946	sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
6947	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6948		"monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
6949		"mask of error frames to pass when monitoring");
6950#ifdef IEEE80211_SUPPORT_TDMA
6951	if (ath_hal_macversion(ah) > 0x78) {
6952		sc->sc_tdmadbaprep = 2;
6953		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6954			"dbaprep", CTLFLAG_RW, &sc->sc_tdmadbaprep, 0,
6955			"TDMA DBA preparation time");
6956		sc->sc_tdmaswbaprep = 10;
6957		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6958			"swbaprep", CTLFLAG_RW, &sc->sc_tdmaswbaprep, 0,
6959			"TDMA SWBA preparation time");
6960		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6961			"guardtime", CTLFLAG_RW, &sc->sc_tdmaguard, 0,
6962			"TDMA slot guard time");
6963		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6964			"superframe", CTLFLAG_RD, &sc->sc_tdmabintval, 0,
6965			"TDMA calculated super frame");
6966		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6967			"setcca", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6968			ath_sysctl_setcca, "I", "enable CCA control");
6969	}
6970#endif
6971}
6972
6973static void
6974ath_bpfattach(struct ath_softc *sc)
6975{
6976	struct ifnet *ifp = sc->sc_ifp;
6977
6978	bpfattach(ifp, DLT_IEEE802_11_RADIO,
6979		sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th));
6980	/*
6981	 * Initialize constant fields.
6982	 * XXX make header lengths a multiple of 32-bits so subsequent
6983	 *     headers are properly aligned; this is a kludge to keep
6984	 *     certain applications happy.
6985	 *
6986	 * NB: the channel is setup each time we transition to the
6987	 *     RUN state to avoid filling it in for each frame.
6988	 */
6989	sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t));
6990	sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len);
6991	sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT);
6992
6993	sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t));
6994	sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len);
6995	sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT);
6996}
6997
6998static int
6999ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
7000	struct ath_buf *bf, struct mbuf *m0,
7001	const struct ieee80211_bpf_params *params)
7002{
7003	struct ifnet *ifp = sc->sc_ifp;
7004	struct ieee80211com *ic = ifp->if_l2com;
7005	struct ath_hal *ah = sc->sc_ah;
7006	int error, ismcast, ismrr;
7007	int keyix, hdrlen, pktlen, try0, txantenna;
7008	u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
7009	struct ieee80211_frame *wh;
7010	u_int flags, ctsduration;
7011	HAL_PKT_TYPE atype;
7012	const HAL_RATE_TABLE *rt;
7013	struct ath_desc *ds;
7014	u_int pri;
7015
7016	wh = mtod(m0, struct ieee80211_frame *);
7017	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
7018	hdrlen = ieee80211_anyhdrsize(wh);
7019	/*
7020	 * Packet length must not include any
7021	 * pad bytes; deduct them here.
7022	 */
7023	/* XXX honor IEEE80211_BPF_DATAPAD */
7024	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
7025
7026	if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
7027		const struct ieee80211_cipher *cip;
7028		struct ieee80211_key *k;
7029
7030		/*
7031		 * Construct the 802.11 header+trailer for an encrypted
7032		 * frame. The only reason this can fail is because of an
7033		 * unknown or unsupported cipher/key type.
7034		 */
7035		k = ieee80211_crypto_encap(ni, m0);
7036		if (k == NULL) {
7037			/*
7038			 * This can happen when the key is yanked after the
7039			 * frame was queued.  Just discard the frame; the
7040			 * 802.11 layer counts failures and provides
7041			 * debugging/diagnostics.
7042			 */
7043			ath_freetx(m0);
7044			return EIO;
7045		}
7046		/*
7047		 * Adjust the packet + header lengths for the crypto
7048		 * additions and calculate the h/w key index.  When
7049		 * a s/w mic is done the frame will have had any mic
7050		 * added to it prior to entry so m0->m_pkthdr.len will
7051		 * account for it. Otherwise we need to add it to the
7052		 * packet length.
7053		 */
7054		cip = k->wk_cipher;
7055		hdrlen += cip->ic_header;
7056		pktlen += cip->ic_header + cip->ic_trailer;
7057		/* NB: frags always have any TKIP MIC done in s/w */
7058		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
7059			pktlen += cip->ic_miclen;
7060		keyix = k->wk_keyix;
7061
7062		/* packet header may have moved, reset our local pointer */
7063		wh = mtod(m0, struct ieee80211_frame *);
7064	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
7065		/*
7066		 * Use station key cache slot, if assigned.
7067		 */
7068		keyix = ni->ni_ucastkey.wk_keyix;
7069		if (keyix == IEEE80211_KEYIX_NONE)
7070			keyix = HAL_TXKEYIX_INVALID;
7071	} else
7072		keyix = HAL_TXKEYIX_INVALID;
7073
7074	error = ath_tx_dmasetup(sc, bf, m0);
7075	if (error != 0)
7076		return error;
7077	m0 = bf->bf_m;				/* NB: may have changed */
7078	wh = mtod(m0, struct ieee80211_frame *);
7079	bf->bf_node = ni;			/* NB: held reference */
7080
7081	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
7082	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
7083	if (params->ibp_flags & IEEE80211_BPF_RTS)
7084		flags |= HAL_TXDESC_RTSENA;
7085	else if (params->ibp_flags & IEEE80211_BPF_CTS)
7086		flags |= HAL_TXDESC_CTSENA;
7087	/* XXX leave ismcast to injector? */
7088	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
7089		flags |= HAL_TXDESC_NOACK;
7090
7091	rt = sc->sc_currates;
7092	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
7093	rix = ath_tx_findrix(rt, params->ibp_rate0);
7094	txrate = rt->info[rix].rateCode;
7095	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7096		txrate |= rt->info[rix].shortPreamble;
7097	sc->sc_txrix = rix;
7098	try0 = params->ibp_try0;
7099	ismrr = (params->ibp_try1 != 0);
7100	txantenna = params->ibp_pri >> 2;
7101	if (txantenna == 0)			/* XXX? */
7102		txantenna = sc->sc_txantenna;
7103	ctsduration = 0;
7104	if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
7105		cix = ath_tx_findrix(rt, params->ibp_ctsrate);
7106		ctsrate = rt->info[cix].rateCode;
7107		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
7108			ctsrate |= rt->info[cix].shortPreamble;
7109			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
7110				ctsduration += rt->info[cix].spAckDuration;
7111			ctsduration += ath_hal_computetxtime(ah,
7112				rt, pktlen, rix, AH_TRUE);
7113			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
7114				ctsduration += rt->info[rix].spAckDuration;
7115		} else {
7116			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
7117				ctsduration += rt->info[cix].lpAckDuration;
7118			ctsduration += ath_hal_computetxtime(ah,
7119				rt, pktlen, rix, AH_FALSE);
7120			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
7121				ctsduration += rt->info[rix].lpAckDuration;
7122		}
7123		ismrr = 0;			/* XXX */
7124	} else
7125		ctsrate = 0;
7126	pri = params->ibp_pri & 3;
7127	/*
7128	 * NB: we mark all packets as type PSPOLL so the h/w won't
7129	 * set the sequence number, duration, etc.
7130	 */
7131	atype = HAL_PKT_TYPE_PSPOLL;
7132
7133	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
7134		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
7135			sc->sc_hwmap[rix].ieeerate, -1);
7136
7137	if (bpf_peers_present(ifp->if_bpf)) {
7138		u_int64_t tsf = ath_hal_gettsf64(ah);
7139
7140		sc->sc_tx_th.wt_tsf = htole64(tsf);
7141		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
7142		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
7143			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
7144		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
7145		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
7146		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
7147
7148		bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
7149	}
7150
7151	/*
7152	 * Formulate first tx descriptor with tx controls.
7153	 */
7154	ds = bf->bf_desc;
7155	/* XXX check return value? */
7156	ath_hal_setuptxdesc(ah, ds
7157		, pktlen		/* packet length */
7158		, hdrlen		/* header length */
7159		, atype			/* Atheros packet type */
7160		, params->ibp_power	/* txpower */
7161		, txrate, try0		/* series 0 rate/tries */
7162		, keyix			/* key cache index */
7163		, txantenna		/* antenna mode */
7164		, flags			/* flags */
7165		, ctsrate		/* rts/cts rate */
7166		, ctsduration		/* rts/cts duration */
7167	);
7168	bf->bf_txflags = flags;
7169
7170	if (ismrr) {
7171		rix = ath_tx_findrix(rt, params->ibp_rate1);
7172		rate1 = rt->info[rix].rateCode;
7173		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7174			rate1 |= rt->info[rix].shortPreamble;
7175		if (params->ibp_try2) {
7176			rix = ath_tx_findrix(rt, params->ibp_rate2);
7177			rate2 = rt->info[rix].rateCode;
7178			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7179				rate2 |= rt->info[rix].shortPreamble;
7180		} else
7181			rate2 = 0;
7182		if (params->ibp_try3) {
7183			rix = ath_tx_findrix(rt, params->ibp_rate3);
7184			rate3 = rt->info[rix].rateCode;
7185			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7186				rate3 |= rt->info[rix].shortPreamble;
7187		} else
7188			rate3 = 0;
7189		ath_hal_setupxtxdesc(ah, ds
7190			, rate1, params->ibp_try1	/* series 1 */
7191			, rate2, params->ibp_try2	/* series 2 */
7192			, rate3, params->ibp_try3	/* series 3 */
7193		);
7194	}
7195
7196	/* NB: no buffered multicast in power save support */
7197	ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
7198	return 0;
7199}
7200
7201static int
7202ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
7203	const struct ieee80211_bpf_params *params)
7204{
7205	struct ieee80211com *ic = ni->ni_ic;
7206	struct ifnet *ifp = ic->ic_ifp;
7207	struct ath_softc *sc = ifp->if_softc;
7208	struct ath_buf *bf;
7209
7210	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
7211		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
7212		    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
7213			"!running" : "invalid");
7214		sc->sc_stats.ast_tx_raw_fail++;
7215		ieee80211_free_node(ni);
7216		m_freem(m);
7217		return ENETDOWN;
7218	}
7219	/*
7220	 * Grab a TX buffer and associated resources.
7221	 */
7222	bf = ath_getbuf(sc);
7223	if (bf == NULL) {
7224		/* NB: ath_getbuf handles stat+msg */
7225		ieee80211_free_node(ni);
7226		m_freem(m);
7227		return ENOBUFS;
7228	}
7229
7230	ifp->if_opackets++;
7231	sc->sc_stats.ast_tx_raw++;
7232
7233	if (params == NULL) {
7234		/*
7235		 * Legacy path; interpret frame contents to decide
7236		 * precisely how to send the frame.
7237		 */
7238		if (ath_tx_start(sc, ni, bf, m))
7239			goto bad;
7240	} else {
7241		/*
7242		 * Caller supplied explicit parameters to use in
7243		 * sending the frame.
7244		 */
7245		if (ath_tx_raw_start(sc, ni, bf, m, params))
7246			goto bad;
7247	}
7248	sc->sc_wd_timer = 5;
7249
7250	return 0;
7251bad:
7252	ifp->if_oerrors++;
7253	ATH_TXBUF_LOCK(sc);
7254	STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
7255	ATH_TXBUF_UNLOCK(sc);
7256	ieee80211_free_node(ni);
7257	return EIO;		/* XXX */
7258}
7259
7260/*
7261 * Announce various information on device/driver attach.
7262 */
7263static void
7264ath_announce(struct ath_softc *sc)
7265{
7266	struct ifnet *ifp = sc->sc_ifp;
7267	struct ath_hal *ah = sc->sc_ah;
7268
7269	if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
7270		ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
7271		ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
7272	if (bootverbose) {
7273		int i;
7274		for (i = 0; i <= WME_AC_VO; i++) {
7275			struct ath_txq *txq = sc->sc_ac2q[i];
7276			if_printf(ifp, "Use hw queue %u for %s traffic\n",
7277				txq->axq_qnum, ieee80211_wme_acnames[i]);
7278		}
7279		if_printf(ifp, "Use hw queue %u for CAB traffic\n",
7280			sc->sc_cabq->axq_qnum);
7281		if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
7282	}
7283	if (ath_rxbuf != ATH_RXBUF)
7284		if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
7285	if (ath_txbuf != ATH_TXBUF)
7286		if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
7287}
7288
7289#ifdef IEEE80211_SUPPORT_TDMA
7290static __inline uint32_t
7291ath_hal_getnexttbtt(struct ath_hal *ah)
7292{
7293#define	AR_TIMER0	0x8028
7294	return OS_REG_READ(ah, AR_TIMER0);
7295}
7296
7297static __inline void
7298ath_hal_adjusttsf(struct ath_hal *ah, int32_t tsfdelta)
7299{
7300	/* XXX handle wrap/overflow */
7301	OS_REG_WRITE(ah, AR_TSF_L32, OS_REG_READ(ah, AR_TSF_L32) + tsfdelta);
7302}
7303
7304static void
7305ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
7306{
7307	struct ath_hal *ah = sc->sc_ah;
7308	HAL_BEACON_TIMERS bt;
7309
7310	bt.bt_intval = bintval | HAL_BEACON_ENA;
7311	bt.bt_nexttbtt = nexttbtt;
7312	bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
7313	bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
7314	bt.bt_nextatim = nexttbtt+1;
7315	ath_hal_beaconsettimers(ah, &bt);
7316}
7317
7318/*
7319 * Calculate the beacon interval.  This is periodic in the
7320 * superframe for the bss.  We assume each station is configured
7321 * identically wrt transmit rate so the guard time we calculate
7322 * above will be the same on all stations.  Note we need to
7323 * factor in the xmit time because the hardware will schedule
7324 * a frame for transmit if the start of the frame is within
7325 * the burst time.  When we get hardware that properly kills
7326 * frames in the PCU we can reduce/eliminate the guard time.
7327 *
7328 * Roundup to 1024 is so we have 1 TU buffer in the guard time
7329 * to deal with the granularity of the nexttbtt timer.  11n MAC's
7330 * with 1us timer granularity should allow us to reduce/eliminate
7331 * this.
7332 */
7333static void
7334ath_tdma_bintvalsetup(struct ath_softc *sc,
7335	const struct ieee80211_tdma_state *tdma)
7336{
7337	/* copy from vap state (XXX check all vaps have same value?) */
7338	sc->sc_tdmaslotlen = tdma->tdma_slotlen;
7339	sc->sc_tdmabintcnt = tdma->tdma_bintval;
7340
7341	sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
7342		tdma->tdma_slotcnt, 1024);
7343	sc->sc_tdmabintval >>= 10;		/* TSF -> TU */
7344	if (sc->sc_tdmabintval & 1)
7345		sc->sc_tdmabintval++;
7346
7347	if (tdma->tdma_slot == 0) {
7348		/*
7349		 * Only slot 0 beacons; other slots respond.
7350		 */
7351		sc->sc_imask |= HAL_INT_SWBA;
7352		sc->sc_tdmaswba = 0;		/* beacon immediately */
7353	} else {
7354		/* XXX all vaps must be slot 0 or slot !0 */
7355		sc->sc_imask &= ~HAL_INT_SWBA;
7356	}
7357}
7358
7359/*
7360 * Max 802.11 overhead.  This assumes no 4-address frames and
7361 * the encapsulation done by ieee80211_encap (llc).  We also
7362 * include potential crypto overhead.
7363 */
7364#define	IEEE80211_MAXOVERHEAD \
7365	(sizeof(struct ieee80211_qosframe) \
7366	 + sizeof(struct llc) \
7367	 + IEEE80211_ADDR_LEN \
7368	 + IEEE80211_WEP_IVLEN \
7369	 + IEEE80211_WEP_KIDLEN \
7370	 + IEEE80211_WEP_CRCLEN \
7371	 + IEEE80211_WEP_MICLEN \
7372	 + IEEE80211_CRC_LEN)
7373
7374/*
7375 * Setup initially for tdma operation.  Start the beacon
7376 * timers and enable SWBA if we are slot 0.  Otherwise
7377 * we wait for slot 0 to arrive so we can sync up before
7378 * starting to transmit.
7379 */
7380static void
7381ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
7382{
7383	struct ath_hal *ah = sc->sc_ah;
7384	struct ifnet *ifp = sc->sc_ifp;
7385	struct ieee80211com *ic = ifp->if_l2com;
7386	const struct ieee80211_txparam *tp;
7387	const struct ieee80211_tdma_state *tdma = NULL;
7388	int rix;
7389
7390	if (vap == NULL) {
7391		vap = TAILQ_FIRST(&ic->ic_vaps);   /* XXX */
7392		if (vap == NULL) {
7393			if_printf(ifp, "%s: no vaps?\n", __func__);
7394			return;
7395		}
7396	}
7397	tp = vap->iv_bss->ni_txparms;
7398	/*
7399	 * Calculate the guard time for each slot.  This is the
7400	 * time to send a maximal-size frame according to the
7401	 * fixed/lowest transmit rate.  Note that the interface
7402	 * mtu does not include the 802.11 overhead so we must
7403	 * tack that on (ath_hal_computetxtime includes the
7404	 * preamble and plcp in it's calculation).
7405	 */
7406	tdma = vap->iv_tdma;
7407	if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
7408		rix = ath_tx_findrix(sc->sc_currates, tp->ucastrate);
7409	else
7410		rix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
7411	/* XXX short preamble assumed */
7412	sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
7413		ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
7414
7415	ath_hal_intrset(ah, 0);
7416
7417	ath_beaconq_config(sc);			/* setup h/w beacon q */
7418	if (sc->sc_setcca)
7419		ath_hal_setcca(ah, AH_FALSE);	/* disable CCA */
7420	ath_tdma_bintvalsetup(sc, tdma);	/* calculate beacon interval */
7421	ath_tdma_settimers(sc, sc->sc_tdmabintval,
7422		sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
7423	sc->sc_syncbeacon = 0;
7424
7425	sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
7426	sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
7427
7428	ath_hal_intrset(ah, sc->sc_imask);
7429
7430	DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
7431	    "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
7432	    tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
7433	    tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
7434	    sc->sc_tdmadbaprep);
7435}
7436
7437/*
7438 * Update tdma operation.  Called from the 802.11 layer
7439 * when a beacon is received from the TDMA station operating
7440 * in the slot immediately preceding us in the bss.  Use
7441 * the rx timestamp for the beacon frame to update our
7442 * beacon timers so we follow their schedule.  Note that
7443 * by using the rx timestamp we implicitly include the
7444 * propagation delay in our schedule.
7445 */
7446static void
7447ath_tdma_update(struct ieee80211_node *ni,
7448	const struct ieee80211_tdma_param *tdma, int changed)
7449{
7450#define	TSF_TO_TU(_h,_l) \
7451	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
7452#define	TU_TO_TSF(_tu)	(((u_int64_t)(_tu)) << 10)
7453	struct ieee80211vap *vap = ni->ni_vap;
7454	struct ieee80211com *ic = ni->ni_ic;
7455	struct ath_softc *sc = ic->ic_ifp->if_softc;
7456	struct ath_hal *ah = sc->sc_ah;
7457	const HAL_RATE_TABLE *rt = sc->sc_currates;
7458	u_int64_t tsf, rstamp, nextslot;
7459	u_int32_t txtime, nextslottu, timer0;
7460	int32_t tudelta, tsfdelta;
7461	const struct ath_rx_status *rs;
7462	int rix;
7463
7464	sc->sc_stats.ast_tdma_update++;
7465
7466	/*
7467	 * Check for and adopt configuration changes.
7468	 */
7469	if (changed != 0) {
7470		const struct ieee80211_tdma_state *ts = vap->iv_tdma;
7471
7472		ath_tdma_bintvalsetup(sc, ts);
7473
7474		DPRINTF(sc, ATH_DEBUG_TDMA,
7475		    "%s: adopt slot %u slotcnt %u slotlen %u us "
7476		    "bintval %u TU\n", __func__,
7477		    ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
7478		    sc->sc_tdmabintval);
7479
7480		ath_beaconq_config(sc);
7481		/* XXX right? */
7482		ath_hal_intrset(ah, sc->sc_imask);
7483		/* NB: beacon timers programmed below */
7484	}
7485
7486	/* extend rx timestamp to 64 bits */
7487	tsf = ath_hal_gettsf64(ah);
7488	rstamp = ath_extend_tsf(ni->ni_rstamp, tsf);
7489	/*
7490	 * The rx timestamp is set by the hardware on completing
7491	 * reception (at the point where the rx descriptor is DMA'd
7492	 * to the host).  To find the start of our next slot we
7493	 * must adjust this time by the time required to send
7494	 * the packet just received.
7495	 */
7496	rs = sc->sc_tdmars;
7497	rix = rt->rateCodeToIndex[rs->rs_rate];
7498	txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
7499	    rt->info[rix].shortPreamble);
7500	/* NB: << 9 is to cvt to TU and /2 */
7501	nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
7502	nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
7503
7504	/*
7505	 * TIMER0 is the h/w's idea of NextTBTT (in TU's).  Convert
7506	 * to usecs and calculate the difference between what the
7507	 * other station thinks and what we have programmed.  This
7508	 * lets us figure how to adjust our timers to match.  The
7509	 * adjustments are done by pulling the TSF forward and possibly
7510	 * rewriting the beacon timers.
7511	 */
7512	timer0 = ath_hal_getnexttbtt(ah);
7513	tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD+1)) - TU_TO_TSF(timer0));
7514
7515	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7516	    "tsfdelta %d avg +%d/-%d\n", tsfdelta,
7517	    TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
7518
7519	if (tsfdelta < 0) {
7520		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7521		TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
7522		tsfdelta = -tsfdelta % 1024;
7523		nextslottu++;
7524	} else if (tsfdelta > 0) {
7525		TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
7526		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7527		tsfdelta = 1024 - (tsfdelta % 1024);
7528		nextslottu++;
7529	} else {
7530		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7531		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7532	}
7533	tudelta = nextslottu - timer0;
7534
7535	/*
7536	 * Copy sender's timetstamp into tdma ie so they can
7537	 * calculate roundtrip time.  We submit a beacon frame
7538	 * below after any timer adjustment.  The frame goes out
7539	 * at the next TBTT so the sender can calculate the
7540	 * roundtrip by inspecting the tdma ie in our beacon frame.
7541	 *
7542	 * NB: This tstamp is subtlely preserved when
7543	 *     IEEE80211_BEACON_TDMA is marked (e.g. when the
7544	 *     slot position changes) because ieee80211_add_tdma
7545	 *     skips over the data.
7546	 */
7547	memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
7548		__offsetof(struct ieee80211_tdma_param, tdma_tstamp),
7549		&ni->ni_tstamp.data, 8);
7550#if 0
7551	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7552	    "tsf %llu nextslot %llu (%d, %d) nextslottu %u timer0 %u (%d)\n",
7553	    (unsigned long long) tsf, (unsigned long long) nextslot,
7554	    (int)(nextslot - tsf), tsfdelta,
7555	    nextslottu, timer0, tudelta);
7556#endif
7557	/*
7558	 * Adjust the beacon timers only when pulling them forward
7559	 * or when going back by less than the beacon interval.
7560	 * Negative jumps larger than the beacon interval seem to
7561	 * cause the timers to stop and generally cause instability.
7562	 * This basically filters out jumps due to missed beacons.
7563	 */
7564	if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
7565		ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
7566		sc->sc_stats.ast_tdma_timers++;
7567	}
7568	if (tsfdelta > 0) {
7569		ath_hal_adjusttsf(ah, tsfdelta);
7570		sc->sc_stats.ast_tdma_tsf++;
7571	}
7572	ath_tdma_beacon_send(sc, vap);		/* prepare response */
7573#undef TU_TO_TSF
7574#undef TSF_TO_TU
7575}
7576
7577/*
7578 * Transmit a beacon frame at SWBA.  Dynamic updates
7579 * to the frame contents are done as needed.
7580 */
7581static void
7582ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
7583{
7584	struct ath_hal *ah = sc->sc_ah;
7585	struct ath_buf *bf;
7586	int otherant;
7587
7588	/*
7589	 * Check if the previous beacon has gone out.  If
7590	 * not don't try to post another, skip this period
7591	 * and wait for the next.  Missed beacons indicate
7592	 * a problem and should not occur.  If we miss too
7593	 * many consecutive beacons reset the device.
7594	 */
7595	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
7596		sc->sc_bmisscount++;
7597		DPRINTF(sc, ATH_DEBUG_BEACON,
7598			"%s: missed %u consecutive beacons\n",
7599			__func__, sc->sc_bmisscount);
7600		if (sc->sc_bmisscount >= ath_bstuck_threshold)
7601			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
7602		return;
7603	}
7604	if (sc->sc_bmisscount != 0) {
7605		DPRINTF(sc, ATH_DEBUG_BEACON,
7606			"%s: resume beacon xmit after %u misses\n",
7607			__func__, sc->sc_bmisscount);
7608		sc->sc_bmisscount = 0;
7609	}
7610
7611	/*
7612	 * Check recent per-antenna transmit statistics and flip
7613	 * the default antenna if noticeably more frames went out
7614	 * on the non-default antenna.
7615	 * XXX assumes 2 anntenae
7616	 */
7617	if (!sc->sc_diversity) {
7618		otherant = sc->sc_defant & 1 ? 2 : 1;
7619		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
7620			ath_setdefantenna(sc, otherant);
7621		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
7622	}
7623
7624	bf = ath_beacon_generate(sc, vap);
7625	if (bf != NULL) {
7626		/*
7627		 * Stop any current dma and put the new frame on the queue.
7628		 * This should never fail since we check above that no frames
7629		 * are still pending on the queue.
7630		 */
7631		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
7632			DPRINTF(sc, ATH_DEBUG_ANY,
7633				"%s: beacon queue %u did not stop?\n",
7634				__func__, sc->sc_bhalq);
7635			/* NB: the HAL still stops DMA, so proceed */
7636		}
7637		ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
7638		ath_hal_txstart(ah, sc->sc_bhalq);
7639
7640		sc->sc_stats.ast_be_xmit++;		/* XXX per-vap? */
7641
7642		/*
7643		 * Record local TSF for our last send for use
7644		 * in arbitrating slot collisions.
7645		 */
7646		vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
7647	}
7648}
7649#endif /* IEEE80211_SUPPORT_TDMA */
7650