if_iwn.c revision 281480
1/*-
2 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
3 * Copyright (c) 2011 Intel Corporation
4 * Copyright (c) 2007-2009
5 *	Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2008
7 *	Benjamin Close <benjsc@FreeBSD.org>
8 * Copyright (c) 2008 Sam Leffler, Errno Consulting
9 *
10 * Permission to use, copy, modify, and distribute this software for any
11 * purpose with or without fee is hereby granted, provided that the above
12 * copyright notice and this permission notice appear in all copies.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 */
22
23/*
24 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
25 * adapters.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/iwn/if_iwn.c 281480 2015-04-13 01:01:17Z eadler $");
30
31#include "opt_wlan.h"
32#include "opt_iwn.h"
33
34#include <sys/param.h>
35#include <sys/sockio.h>
36#include <sys/sysctl.h>
37#include <sys/mbuf.h>
38#include <sys/kernel.h>
39#include <sys/socket.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/rman.h>
44#include <sys/endian.h>
45#include <sys/firmware.h>
46#include <sys/limits.h>
47#include <sys/module.h>
48#include <sys/queue.h>
49#include <sys/taskqueue.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53#include <machine/clock.h>
54
55#include <dev/pci/pcireg.h>
56#include <dev/pci/pcivar.h>
57
58#include <net/bpf.h>
59#include <net/if.h>
60#include <net/if_arp.h>
61#include <net/ethernet.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65
66#include <netinet/in.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/if_ether.h>
70#include <netinet/ip.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_radiotap.h>
74#include <net80211/ieee80211_regdomain.h>
75#include <net80211/ieee80211_ratectl.h>
76
77#include <dev/iwn/if_iwnreg.h>
78#include <dev/iwn/if_iwnvar.h>
79#include <dev/iwn/if_iwn_devid.h>
80
81struct iwn_ident {
82	uint16_t	vendor;
83	uint16_t	device;
84	const char	*name;
85};
86
87static const struct iwn_ident iwn_ident_table[] = {
88	{ 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205"		},
89	{ 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000"		},
90	{ 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000"		},
91	{ 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205"		},
92	{ 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250"	},
93	{ 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250"	},
94	{ 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030"		},
95	{ 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030"		},
96	{ 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230"		},
97	{ 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230"		},
98	{ 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150"	},
99	{ 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150"	},
100	{ 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230"		},
101	{ 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230"		},
102	{ 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130"		},
103	{ 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130"		},
104	{ 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100"		},
105	{ 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100"		},
106	{ 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965"		},
107	{ 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300"		},
108	{ 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200"		},
109	{ 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965"		},
110	{ 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965"		},
111	{ 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100"			},
112	{ 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965"		},
113	{ 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300"		},
114	{ 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300"		},
115	{ 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100"			},
116	{ 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300"		},
117	{ 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200"		},
118	{ 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350"			},
119	{ 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350"			},
120	{ 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150"			},
121	{ 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150"			},
122	{ 0, 0, NULL }
123};
124
125static int	iwn_probe(device_t);
126static int	iwn_attach(device_t);
127static int	iwn4965_attach(struct iwn_softc *, uint16_t);
128static int	iwn5000_attach(struct iwn_softc *, uint16_t);
129static void	iwn_radiotap_attach(struct iwn_softc *);
130static void	iwn_sysctlattach(struct iwn_softc *);
131static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
132		    const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
133		    const uint8_t [IEEE80211_ADDR_LEN],
134		    const uint8_t [IEEE80211_ADDR_LEN]);
135static void	iwn_vap_delete(struct ieee80211vap *);
136static int	iwn_detach(device_t);
137static int	iwn_shutdown(device_t);
138static int	iwn_suspend(device_t);
139static int	iwn_resume(device_t);
140static int	iwn_nic_lock(struct iwn_softc *);
141static int	iwn_eeprom_lock(struct iwn_softc *);
142static int	iwn_init_otprom(struct iwn_softc *);
143static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
144static void	iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
145static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
146		    void **, bus_size_t, bus_size_t);
147static void	iwn_dma_contig_free(struct iwn_dma_info *);
148static int	iwn_alloc_sched(struct iwn_softc *);
149static void	iwn_free_sched(struct iwn_softc *);
150static int	iwn_alloc_kw(struct iwn_softc *);
151static void	iwn_free_kw(struct iwn_softc *);
152static int	iwn_alloc_ict(struct iwn_softc *);
153static void	iwn_free_ict(struct iwn_softc *);
154static int	iwn_alloc_fwmem(struct iwn_softc *);
155static void	iwn_free_fwmem(struct iwn_softc *);
156static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
157static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
158static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
159static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
160		    int);
161static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
162static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
163static void	iwn5000_ict_reset(struct iwn_softc *);
164static int	iwn_read_eeprom(struct iwn_softc *,
165		    uint8_t macaddr[IEEE80211_ADDR_LEN]);
166static void	iwn4965_read_eeprom(struct iwn_softc *);
167#ifdef	IWN_DEBUG
168static void	iwn4965_print_power_group(struct iwn_softc *, int);
169#endif
170static void	iwn5000_read_eeprom(struct iwn_softc *);
171static uint32_t	iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
172static void	iwn_read_eeprom_band(struct iwn_softc *, int);
173static void	iwn_read_eeprom_ht40(struct iwn_softc *, int);
174static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
175static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
176		    struct ieee80211_channel *);
177static int	iwn_setregdomain(struct ieee80211com *,
178		    struct ieee80211_regdomain *, int,
179		    struct ieee80211_channel[]);
180static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
181static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
182		    const uint8_t mac[IEEE80211_ADDR_LEN]);
183static void	iwn_newassoc(struct ieee80211_node *, int);
184static int	iwn_media_change(struct ifnet *);
185static int	iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
186static void	iwn_calib_timeout(void *);
187static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
188		    struct iwn_rx_data *);
189static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
190		    struct iwn_rx_data *);
191static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
192		    struct iwn_rx_data *);
193static void	iwn5000_rx_calib_results(struct iwn_softc *,
194		    struct iwn_rx_desc *, struct iwn_rx_data *);
195static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
196		    struct iwn_rx_data *);
197static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
198		    struct iwn_rx_data *);
199static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
200		    struct iwn_rx_data *);
201static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
202		    uint8_t);
203static void	iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *);
204static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
205static void	iwn_notif_intr(struct iwn_softc *);
206static void	iwn_wakeup_intr(struct iwn_softc *);
207static void	iwn_rftoggle_intr(struct iwn_softc *);
208static void	iwn_fatal_intr(struct iwn_softc *);
209static void	iwn_intr(void *);
210static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
211		    uint16_t);
212static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
213		    uint16_t);
214#ifdef notyet
215static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
216#endif
217static int	iwn_tx_data(struct iwn_softc *, struct mbuf *,
218		    struct ieee80211_node *);
219static int	iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
220		    struct ieee80211_node *,
221		    const struct ieee80211_bpf_params *params);
222static int	iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
223		    const struct ieee80211_bpf_params *);
224static void	iwn_start(struct ifnet *);
225static void	iwn_start_locked(struct ifnet *);
226static void	iwn_watchdog(void *);
227static int	iwn_ioctl(struct ifnet *, u_long, caddr_t);
228static int	iwn_cmd(struct iwn_softc *, int, const void *, int, int);
229static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
230		    int);
231static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
232		    int);
233static int	iwn_set_link_quality(struct iwn_softc *,
234		    struct ieee80211_node *);
235static int	iwn_add_broadcast_node(struct iwn_softc *, int);
236static int	iwn_updateedca(struct ieee80211com *);
237static void	iwn_update_mcast(struct ifnet *);
238static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
239static int	iwn_set_critical_temp(struct iwn_softc *);
240static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
241static void	iwn4965_power_calibration(struct iwn_softc *, int);
242static int	iwn4965_set_txpower(struct iwn_softc *,
243		    struct ieee80211_channel *, int);
244static int	iwn5000_set_txpower(struct iwn_softc *,
245		    struct ieee80211_channel *, int);
246static int	iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
247static int	iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
248static int	iwn_get_noise(const struct iwn_rx_general_stats *);
249static int	iwn4965_get_temperature(struct iwn_softc *);
250static int	iwn5000_get_temperature(struct iwn_softc *);
251static int	iwn_init_sensitivity(struct iwn_softc *);
252static void	iwn_collect_noise(struct iwn_softc *,
253		    const struct iwn_rx_general_stats *);
254static int	iwn4965_init_gains(struct iwn_softc *);
255static int	iwn5000_init_gains(struct iwn_softc *);
256static int	iwn4965_set_gains(struct iwn_softc *);
257static int	iwn5000_set_gains(struct iwn_softc *);
258static void	iwn_tune_sensitivity(struct iwn_softc *,
259		    const struct iwn_rx_stats *);
260static int	iwn_send_sensitivity(struct iwn_softc *);
261static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
262static int	iwn_send_btcoex(struct iwn_softc *);
263static int	iwn_send_advanced_btcoex(struct iwn_softc *);
264static int	iwn5000_runtime_calib(struct iwn_softc *);
265static int	iwn_config(struct iwn_softc *);
266static uint8_t	*ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
267static int	iwn_scan(struct iwn_softc *);
268static int	iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
269static int	iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
270static int	iwn_ampdu_rx_start(struct ieee80211_node *,
271		    struct ieee80211_rx_ampdu *, int, int, int);
272static void	iwn_ampdu_rx_stop(struct ieee80211_node *,
273		    struct ieee80211_rx_ampdu *);
274static int	iwn_addba_request(struct ieee80211_node *,
275		    struct ieee80211_tx_ampdu *, int, int, int);
276static int	iwn_addba_response(struct ieee80211_node *,
277		    struct ieee80211_tx_ampdu *, int, int, int);
278static int	iwn_ampdu_tx_start(struct ieee80211com *,
279		    struct ieee80211_node *, uint8_t);
280static void	iwn_ampdu_tx_stop(struct ieee80211_node *,
281		    struct ieee80211_tx_ampdu *);
282static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
283		    struct ieee80211_node *, int, uint8_t, uint16_t);
284static void	iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
285		    uint8_t, uint16_t);
286static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
287		    struct ieee80211_node *, int, uint8_t, uint16_t);
288static void	iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
289		    uint8_t, uint16_t);
290static int	iwn5000_query_calibration(struct iwn_softc *);
291static int	iwn5000_send_calibration(struct iwn_softc *);
292static int	iwn5000_send_wimax_coex(struct iwn_softc *);
293static int	iwn5000_crystal_calib(struct iwn_softc *);
294static int	iwn5000_temp_offset_calib(struct iwn_softc *);
295static int	iwn4965_post_alive(struct iwn_softc *);
296static int	iwn5000_post_alive(struct iwn_softc *);
297static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
298		    int);
299static int	iwn4965_load_firmware(struct iwn_softc *);
300static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
301		    const uint8_t *, int);
302static int	iwn5000_load_firmware(struct iwn_softc *);
303static int	iwn_read_firmware_leg(struct iwn_softc *,
304		    struct iwn_fw_info *);
305static int	iwn_read_firmware_tlv(struct iwn_softc *,
306		    struct iwn_fw_info *, uint16_t);
307static int	iwn_read_firmware(struct iwn_softc *);
308static int	iwn_clock_wait(struct iwn_softc *);
309static int	iwn_apm_init(struct iwn_softc *);
310static void	iwn_apm_stop_master(struct iwn_softc *);
311static void	iwn_apm_stop(struct iwn_softc *);
312static int	iwn4965_nic_config(struct iwn_softc *);
313static int	iwn5000_nic_config(struct iwn_softc *);
314static int	iwn_hw_prepare(struct iwn_softc *);
315static int	iwn_hw_init(struct iwn_softc *);
316static void	iwn_hw_stop(struct iwn_softc *);
317static void	iwn_radio_on(void *, int);
318static void	iwn_radio_off(void *, int);
319static void	iwn_init_locked(struct iwn_softc *);
320static void	iwn_init(void *);
321static void	iwn_stop_locked(struct iwn_softc *);
322static void	iwn_stop(struct iwn_softc *);
323static void	iwn_scan_start(struct ieee80211com *);
324static void	iwn_scan_end(struct ieee80211com *);
325static void	iwn_set_channel(struct ieee80211com *);
326static void	iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
327static void	iwn_scan_mindwell(struct ieee80211_scan_state *);
328static void	iwn_hw_reset(void *, int);
329#ifdef	IWN_DEBUG
330static char	*iwn_get_csr_string(int);
331static void	iwn_debug_register(struct iwn_softc *);
332#endif
333
334#ifdef	IWN_DEBUG
335enum {
336	IWN_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
337	IWN_DEBUG_RECV		= 0x00000002,	/* basic recv operation */
338	IWN_DEBUG_STATE		= 0x00000004,	/* 802.11 state transitions */
339	IWN_DEBUG_TXPOW		= 0x00000008,	/* tx power processing */
340	IWN_DEBUG_RESET		= 0x00000010,	/* reset processing */
341	IWN_DEBUG_OPS		= 0x00000020,	/* iwn_ops processing */
342	IWN_DEBUG_BEACON 	= 0x00000040,	/* beacon handling */
343	IWN_DEBUG_WATCHDOG 	= 0x00000080,	/* watchdog timeout */
344	IWN_DEBUG_INTR		= 0x00000100,	/* ISR */
345	IWN_DEBUG_CALIBRATE	= 0x00000200,	/* periodic calibration */
346	IWN_DEBUG_NODE		= 0x00000400,	/* node management */
347	IWN_DEBUG_LED		= 0x00000800,	/* led management */
348	IWN_DEBUG_CMD		= 0x00001000,	/* cmd submission */
349	IWN_DEBUG_TXRATE	= 0x00002000,	/* TX rate debugging */
350	IWN_DEBUG_PWRSAVE	= 0x00004000,	/* Power save operations */
351	IWN_DEBUG_REGISTER	= 0x20000000,	/* print chipset register */
352	IWN_DEBUG_TRACE		= 0x40000000,	/* Print begin and start driver function */
353	IWN_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
354	IWN_DEBUG_ANY		= 0xffffffff
355};
356
357#define DPRINTF(sc, m, fmt, ...) do {			\
358	if (sc->sc_debug & (m))				\
359		printf(fmt, __VA_ARGS__);		\
360} while (0)
361
362static const char *
363iwn_intr_str(uint8_t cmd)
364{
365	switch (cmd) {
366	/* Notifications */
367	case IWN_UC_READY:		return "UC_READY";
368	case IWN_ADD_NODE_DONE:		return "ADD_NODE_DONE";
369	case IWN_TX_DONE:		return "TX_DONE";
370	case IWN_START_SCAN:		return "START_SCAN";
371	case IWN_STOP_SCAN:		return "STOP_SCAN";
372	case IWN_RX_STATISTICS:		return "RX_STATS";
373	case IWN_BEACON_STATISTICS:	return "BEACON_STATS";
374	case IWN_STATE_CHANGED:		return "STATE_CHANGED";
375	case IWN_BEACON_MISSED:		return "BEACON_MISSED";
376	case IWN_RX_PHY:		return "RX_PHY";
377	case IWN_MPDU_RX_DONE:		return "MPDU_RX_DONE";
378	case IWN_RX_DONE:		return "RX_DONE";
379
380	/* Command Notifications */
381	case IWN_CMD_RXON:		return "IWN_CMD_RXON";
382	case IWN_CMD_RXON_ASSOC:	return "IWN_CMD_RXON_ASSOC";
383	case IWN_CMD_EDCA_PARAMS:	return "IWN_CMD_EDCA_PARAMS";
384	case IWN_CMD_TIMING:		return "IWN_CMD_TIMING";
385	case IWN_CMD_LINK_QUALITY:	return "IWN_CMD_LINK_QUALITY";
386	case IWN_CMD_SET_LED:		return "IWN_CMD_SET_LED";
387	case IWN5000_CMD_WIMAX_COEX:	return "IWN5000_CMD_WIMAX_COEX";
388	case IWN5000_CMD_CALIB_CONFIG:	return "IWN5000_CMD_CALIB_CONFIG";
389	case IWN5000_CMD_CALIB_RESULT:	return "IWN5000_CMD_CALIB_RESULT";
390	case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
391	case IWN_CMD_SET_POWER_MODE:	return "IWN_CMD_SET_POWER_MODE";
392	case IWN_CMD_SCAN:		return "IWN_CMD_SCAN";
393	case IWN_CMD_SCAN_RESULTS:	return "IWN_CMD_SCAN_RESULTS";
394	case IWN_CMD_TXPOWER:		return "IWN_CMD_TXPOWER";
395	case IWN_CMD_TXPOWER_DBM:	return "IWN_CMD_TXPOWER_DBM";
396	case IWN5000_CMD_TX_ANT_CONFIG:	return "IWN5000_CMD_TX_ANT_CONFIG";
397	case IWN_CMD_BT_COEX:		return "IWN_CMD_BT_COEX";
398	case IWN_CMD_SET_CRITICAL_TEMP:	return "IWN_CMD_SET_CRITICAL_TEMP";
399	case IWN_CMD_SET_SENSITIVITY:	return "IWN_CMD_SET_SENSITIVITY";
400	case IWN_CMD_PHY_CALIB:		return "IWN_CMD_PHY_CALIB";
401	}
402	return "UNKNOWN INTR NOTIF/CMD";
403}
404#else
405#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
406#endif
407
408static device_method_t iwn_methods[] = {
409	/* Device interface */
410	DEVMETHOD(device_probe,		iwn_probe),
411	DEVMETHOD(device_attach,	iwn_attach),
412	DEVMETHOD(device_detach,	iwn_detach),
413	DEVMETHOD(device_shutdown,	iwn_shutdown),
414	DEVMETHOD(device_suspend,	iwn_suspend),
415	DEVMETHOD(device_resume,	iwn_resume),
416
417	DEVMETHOD_END
418};
419
420static driver_t iwn_driver = {
421	"iwn",
422	iwn_methods,
423	sizeof(struct iwn_softc)
424};
425static devclass_t iwn_devclass;
426
427DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
428
429MODULE_VERSION(iwn, 1);
430
431MODULE_DEPEND(iwn, firmware, 1, 1, 1);
432MODULE_DEPEND(iwn, pci, 1, 1, 1);
433MODULE_DEPEND(iwn, wlan, 1, 1, 1);
434
435static int
436iwn_probe(device_t dev)
437{
438	const struct iwn_ident *ident;
439
440	for (ident = iwn_ident_table; ident->name != NULL; ident++) {
441		if (pci_get_vendor(dev) == ident->vendor &&
442		    pci_get_device(dev) == ident->device) {
443			device_set_desc(dev, ident->name);
444			return (BUS_PROBE_DEFAULT);
445		}
446	}
447	return ENXIO;
448}
449
450static int
451iwn_attach(device_t dev)
452{
453	struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
454	struct ieee80211com *ic;
455	struct ifnet *ifp;
456	int i, error, rid;
457	uint8_t macaddr[IEEE80211_ADDR_LEN];
458
459	sc->sc_dev = dev;
460
461#ifdef	IWN_DEBUG
462	error = resource_int_value(device_get_name(sc->sc_dev),
463	    device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
464	if (error != 0)
465		sc->sc_debug = 0;
466#else
467	sc->sc_debug = 0;
468#endif
469
470	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
471
472	/*
473	 * Get the offset of the PCI Express Capability Structure in PCI
474	 * Configuration Space.
475	 */
476	error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
477	if (error != 0) {
478		device_printf(dev, "PCIe capability structure not found!\n");
479		return error;
480	}
481
482	/* Clear device-specific "PCI retry timeout" register (41h). */
483	pci_write_config(dev, 0x41, 0, 1);
484
485	/* Enable bus-mastering. */
486	pci_enable_busmaster(dev);
487
488	rid = PCIR_BAR(0);
489	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
490	    RF_ACTIVE);
491	if (sc->mem == NULL) {
492		device_printf(dev, "can't map mem space\n");
493		error = ENOMEM;
494		return error;
495	}
496	sc->sc_st = rman_get_bustag(sc->mem);
497	sc->sc_sh = rman_get_bushandle(sc->mem);
498
499	i = 1;
500	rid = 0;
501	if (pci_alloc_msi(dev, &i) == 0)
502		rid = 1;
503	/* Install interrupt handler. */
504	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
505	    (rid != 0 ? 0 : RF_SHAREABLE));
506	if (sc->irq == NULL) {
507		device_printf(dev, "can't map interrupt\n");
508		error = ENOMEM;
509		goto fail;
510	}
511
512	IWN_LOCK_INIT(sc);
513
514	/* Read hardware revision and attach. */
515	sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
516	    & IWN_HW_REV_TYPE_MASK;
517	sc->subdevice_id = pci_get_subdevice(dev);
518	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
519		error = iwn4965_attach(sc, pci_get_device(dev));
520	else
521		error = iwn5000_attach(sc, pci_get_device(dev));
522	if (error != 0) {
523		device_printf(dev, "could not attach device, error %d\n",
524		    error);
525		goto fail;
526	}
527
528	if ((error = iwn_hw_prepare(sc)) != 0) {
529		device_printf(dev, "hardware not ready, error %d\n", error);
530		goto fail;
531	}
532
533	/* Allocate DMA memory for firmware transfers. */
534	if ((error = iwn_alloc_fwmem(sc)) != 0) {
535		device_printf(dev,
536		    "could not allocate memory for firmware, error %d\n",
537		    error);
538		goto fail;
539	}
540
541	/* Allocate "Keep Warm" page. */
542	if ((error = iwn_alloc_kw(sc)) != 0) {
543		device_printf(dev,
544		    "could not allocate keep warm page, error %d\n", error);
545		goto fail;
546	}
547
548	/* Allocate ICT table for 5000 Series. */
549	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
550	    (error = iwn_alloc_ict(sc)) != 0) {
551		device_printf(dev, "could not allocate ICT table, error %d\n",
552		    error);
553		goto fail;
554	}
555
556	/* Allocate TX scheduler "rings". */
557	if ((error = iwn_alloc_sched(sc)) != 0) {
558		device_printf(dev,
559		    "could not allocate TX scheduler rings, error %d\n", error);
560		goto fail;
561	}
562
563	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
564	for (i = 0; i < sc->ntxqs; i++) {
565		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
566			device_printf(dev,
567			    "could not allocate TX ring %d, error %d\n", i,
568			    error);
569			goto fail;
570		}
571	}
572
573	/* Allocate RX ring. */
574	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
575		device_printf(dev, "could not allocate RX ring, error %d\n",
576		    error);
577		goto fail;
578	}
579
580	/* Clear pending interrupts. */
581	IWN_WRITE(sc, IWN_INT, 0xffffffff);
582
583	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
584	if (ifp == NULL) {
585		device_printf(dev, "can not allocate ifnet structure\n");
586		goto fail;
587	}
588
589	ic = ifp->if_l2com;
590	ic->ic_ifp = ifp;
591	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
592	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
593
594	/* Set device capabilities. */
595	ic->ic_caps =
596		  IEEE80211_C_STA		/* station mode supported */
597		| IEEE80211_C_MONITOR		/* monitor mode supported */
598		| IEEE80211_C_BGSCAN		/* background scanning */
599		| IEEE80211_C_TXPMGT		/* tx power management */
600		| IEEE80211_C_SHSLOT		/* short slot time supported */
601		| IEEE80211_C_WPA
602		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
603#if 0
604		| IEEE80211_C_IBSS		/* ibss/adhoc mode */
605#endif
606		| IEEE80211_C_WME		/* WME */
607		| IEEE80211_C_PMGT		/* Station-side power mgmt */
608		;
609
610	/* Read MAC address, channels, etc from EEPROM. */
611	if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
612		device_printf(dev, "could not read EEPROM, error %d\n",
613		    error);
614		goto fail;
615	}
616
617	/* Count the number of available chains. */
618	sc->ntxchains =
619	    ((sc->txchainmask >> 2) & 1) +
620	    ((sc->txchainmask >> 1) & 1) +
621	    ((sc->txchainmask >> 0) & 1);
622	sc->nrxchains =
623	    ((sc->rxchainmask >> 2) & 1) +
624	    ((sc->rxchainmask >> 1) & 1) +
625	    ((sc->rxchainmask >> 0) & 1);
626	if (bootverbose) {
627		device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
628		    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
629		    macaddr, ":");
630	}
631
632	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
633		ic->ic_rxstream = sc->nrxchains;
634		ic->ic_txstream = sc->ntxchains;
635
636		/*
637		 * The NICs we currently support cap out at 2x2 support
638		 * separate from the chains being used.
639		 *
640		 * This is a total hack to work around that until some
641		 * per-device method is implemented to return the
642		 * actual stream support.
643		 */
644		if (ic->ic_rxstream > 2)
645			ic->ic_rxstream = 2;
646		if (ic->ic_txstream > 2)
647			ic->ic_txstream = 2;
648
649		ic->ic_htcaps =
650			  IEEE80211_HTCAP_SMPS_OFF	/* SMPS mode disabled */
651			| IEEE80211_HTCAP_SHORTGI20	/* short GI in 20MHz */
652			| IEEE80211_HTCAP_CHWIDTH40	/* 40MHz channel width*/
653			| IEEE80211_HTCAP_SHORTGI40	/* short GI in 40MHz */
654#ifdef notyet
655			| IEEE80211_HTCAP_GREENFIELD
656#if IWN_RBUF_SIZE == 8192
657			| IEEE80211_HTCAP_MAXAMSDU_7935	/* max A-MSDU length */
658#else
659			| IEEE80211_HTCAP_MAXAMSDU_3839	/* max A-MSDU length */
660#endif
661#endif
662			/* s/w capabilities */
663			| IEEE80211_HTC_HT		/* HT operation */
664			| IEEE80211_HTC_AMPDU		/* tx A-MPDU */
665#ifdef notyet
666			| IEEE80211_HTC_AMSDU		/* tx A-MSDU */
667#endif
668			;
669	}
670
671	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
672	ifp->if_softc = sc;
673	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
674	ifp->if_init = iwn_init;
675	ifp->if_ioctl = iwn_ioctl;
676	ifp->if_start = iwn_start;
677	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
678	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
679	IFQ_SET_READY(&ifp->if_snd);
680
681	ieee80211_ifattach(ic, macaddr);
682	ic->ic_vap_create = iwn_vap_create;
683	ic->ic_vap_delete = iwn_vap_delete;
684	ic->ic_raw_xmit = iwn_raw_xmit;
685	ic->ic_node_alloc = iwn_node_alloc;
686	sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
687	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
688	sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
689	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
690	sc->sc_addba_request = ic->ic_addba_request;
691	ic->ic_addba_request = iwn_addba_request;
692	sc->sc_addba_response = ic->ic_addba_response;
693	ic->ic_addba_response = iwn_addba_response;
694	sc->sc_addba_stop = ic->ic_addba_stop;
695	ic->ic_addba_stop = iwn_ampdu_tx_stop;
696	ic->ic_newassoc = iwn_newassoc;
697	ic->ic_wme.wme_update = iwn_updateedca;
698	ic->ic_update_mcast = iwn_update_mcast;
699	ic->ic_scan_start = iwn_scan_start;
700	ic->ic_scan_end = iwn_scan_end;
701	ic->ic_set_channel = iwn_set_channel;
702	ic->ic_scan_curchan = iwn_scan_curchan;
703	ic->ic_scan_mindwell = iwn_scan_mindwell;
704	ic->ic_setregdomain = iwn_setregdomain;
705
706	iwn_radiotap_attach(sc);
707
708	callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
709	callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
710	TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
711	TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
712	TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
713
714	iwn_sysctlattach(sc);
715
716	/*
717	 * Hook our interrupt after all initialization is complete.
718	 */
719	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
720	    NULL, iwn_intr, sc, &sc->sc_ih);
721	if (error != 0) {
722		device_printf(dev, "can't establish interrupt, error %d\n",
723		    error);
724		goto fail;
725	}
726
727	if (bootverbose)
728		ieee80211_announce(ic);
729	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
730	return 0;
731fail:
732	iwn_detach(dev);
733	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
734	return error;
735}
736
737static int
738iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
739{
740	struct iwn_ops *ops = &sc->ops;
741
742	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
743	ops->load_firmware = iwn4965_load_firmware;
744	ops->read_eeprom = iwn4965_read_eeprom;
745	ops->post_alive = iwn4965_post_alive;
746	ops->nic_config = iwn4965_nic_config;
747	ops->update_sched = iwn4965_update_sched;
748	ops->get_temperature = iwn4965_get_temperature;
749	ops->get_rssi = iwn4965_get_rssi;
750	ops->set_txpower = iwn4965_set_txpower;
751	ops->init_gains = iwn4965_init_gains;
752	ops->set_gains = iwn4965_set_gains;
753	ops->add_node = iwn4965_add_node;
754	ops->tx_done = iwn4965_tx_done;
755	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
756	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
757	sc->ntxqs = IWN4965_NTXQUEUES;
758	sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
759	sc->ndmachnls = IWN4965_NDMACHNLS;
760	sc->broadcast_id = IWN4965_ID_BROADCAST;
761	sc->rxonsz = IWN4965_RXONSZ;
762	sc->schedsz = IWN4965_SCHEDSZ;
763	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
764	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
765	sc->fwsz = IWN4965_FWSZ;
766	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
767	sc->limits = &iwn4965_sensitivity_limits;
768	sc->fwname = "iwn4965fw";
769	/* Override chains masks, ROM is known to be broken. */
770	sc->txchainmask = IWN_ANT_AB;
771	sc->rxchainmask = IWN_ANT_ABC;
772
773	DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
774
775	return 0;
776}
777
778static int
779iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
780{
781	struct iwn_ops *ops = &sc->ops;
782
783	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
784
785	ops->load_firmware = iwn5000_load_firmware;
786	ops->read_eeprom = iwn5000_read_eeprom;
787	ops->post_alive = iwn5000_post_alive;
788	ops->nic_config = iwn5000_nic_config;
789	ops->update_sched = iwn5000_update_sched;
790	ops->get_temperature = iwn5000_get_temperature;
791	ops->get_rssi = iwn5000_get_rssi;
792	ops->set_txpower = iwn5000_set_txpower;
793	ops->init_gains = iwn5000_init_gains;
794	ops->set_gains = iwn5000_set_gains;
795	ops->add_node = iwn5000_add_node;
796	ops->tx_done = iwn5000_tx_done;
797	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
798	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
799	sc->ntxqs = IWN5000_NTXQUEUES;
800	sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
801	sc->ndmachnls = IWN5000_NDMACHNLS;
802	sc->broadcast_id = IWN5000_ID_BROADCAST;
803	sc->rxonsz = IWN5000_RXONSZ;
804	sc->schedsz = IWN5000_SCHEDSZ;
805	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
806	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
807	sc->fwsz = IWN5000_FWSZ;
808	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
809	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
810	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
811
812	switch (sc->hw_type) {
813	case IWN_HW_REV_TYPE_5100:
814		sc->limits = &iwn5000_sensitivity_limits;
815		sc->fwname = "iwn5000fw";
816		/* Override chains masks, ROM is known to be broken. */
817		sc->txchainmask = IWN_ANT_B;
818		sc->rxchainmask = IWN_ANT_AB;
819		break;
820	case IWN_HW_REV_TYPE_5150:
821		sc->limits = &iwn5150_sensitivity_limits;
822		sc->fwname = "iwn5150fw";
823		break;
824	case IWN_HW_REV_TYPE_5300:
825	case IWN_HW_REV_TYPE_5350:
826		sc->limits = &iwn5000_sensitivity_limits;
827		sc->fwname = "iwn5000fw";
828		break;
829	case IWN_HW_REV_TYPE_1000:
830		sc->limits = &iwn1000_sensitivity_limits;
831		sc->fwname = "iwn1000fw";
832		break;
833	case IWN_HW_REV_TYPE_6000:
834		sc->limits = &iwn6000_sensitivity_limits;
835		sc->fwname = "iwn6000fw";
836		if (pid == 0x422c || pid == 0x4239) {
837			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
838			/* Override chains masks, ROM is known to be broken. */
839			sc->txchainmask = IWN_ANT_BC;
840			sc->rxchainmask = IWN_ANT_BC;
841		}
842		break;
843	case IWN_HW_REV_TYPE_6050:
844		sc->limits = &iwn6000_sensitivity_limits;
845		sc->fwname = "iwn6050fw";
846		/* Override chains masks, ROM is known to be broken. */
847		sc->txchainmask = IWN_ANT_AB;
848		sc->rxchainmask = IWN_ANT_AB;
849		break;
850	case IWN_HW_REV_TYPE_6005:
851		sc->limits = &iwn6000_sensitivity_limits;
852		if (pid != 0x0082 && pid != 0x0085) {
853			sc->fwname = "iwn6000g2bfw";
854			sc->sc_flags |= IWN_FLAG_ADV_BTCOEX;
855		} else
856			sc->fwname = "iwn6000g2afw";
857		break;
858	default:
859		device_printf(sc->sc_dev, "adapter type %d not supported\n",
860		    sc->hw_type);
861		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
862		return ENOTSUP;
863	}
864	return 0;
865}
866
867/*
868 * Attach the interface to 802.11 radiotap.
869 */
870static void
871iwn_radiotap_attach(struct iwn_softc *sc)
872{
873	struct ifnet *ifp = sc->sc_ifp;
874	struct ieee80211com *ic = ifp->if_l2com;
875	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
876	ieee80211_radiotap_attach(ic,
877	    &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
878		IWN_TX_RADIOTAP_PRESENT,
879	    &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
880		IWN_RX_RADIOTAP_PRESENT);
881	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
882}
883
884static void
885iwn_sysctlattach(struct iwn_softc *sc)
886{
887#ifdef	IWN_DEBUG
888	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
889	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
890
891	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
892	    "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
893		"control debugging printfs");
894#endif
895}
896
897static struct ieee80211vap *
898iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
899    enum ieee80211_opmode opmode, int flags,
900    const uint8_t bssid[IEEE80211_ADDR_LEN],
901    const uint8_t mac[IEEE80211_ADDR_LEN])
902{
903	struct iwn_vap *ivp;
904	struct ieee80211vap *vap;
905	uint8_t mac1[IEEE80211_ADDR_LEN];
906	struct iwn_softc *sc = ic->ic_ifp->if_softc;
907
908	if (!TAILQ_EMPTY(&ic->ic_vaps))		/* only one at a time */
909		return NULL;
910
911	IEEE80211_ADDR_COPY(mac1, mac);
912
913	ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
914	    M_80211_VAP, M_NOWAIT | M_ZERO);
915	if (ivp == NULL)
916		return NULL;
917	vap = &ivp->iv_vap;
918	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
919	ivp->ctx = IWN_RXON_BSS_CTX;
920	IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
921	vap->iv_bmissthreshold = 10;		/* override default */
922	/* Override with driver methods. */
923	ivp->iv_newstate = vap->iv_newstate;
924	vap->iv_newstate = iwn_newstate;
925	sc->ivap[IWN_RXON_BSS_CTX] = vap;
926
927	ieee80211_ratectl_init(vap);
928	/* Complete setup. */
929	ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
930	ic->ic_opmode = opmode;
931	return vap;
932}
933
934static void
935iwn_vap_delete(struct ieee80211vap *vap)
936{
937	struct iwn_vap *ivp = IWN_VAP(vap);
938
939	ieee80211_ratectl_deinit(vap);
940	ieee80211_vap_detach(vap);
941	free(ivp, M_80211_VAP);
942}
943
944static int
945iwn_detach(device_t dev)
946{
947	struct iwn_softc *sc = device_get_softc(dev);
948	struct ifnet *ifp = sc->sc_ifp;
949	struct ieee80211com *ic;
950	int qid;
951
952	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
953
954	if (ifp != NULL) {
955		ic = ifp->if_l2com;
956
957		ieee80211_draintask(ic, &sc->sc_reinit_task);
958		ieee80211_draintask(ic, &sc->sc_radioon_task);
959		ieee80211_draintask(ic, &sc->sc_radiooff_task);
960
961		iwn_stop(sc);
962		callout_drain(&sc->watchdog_to);
963		callout_drain(&sc->calib_to);
964		ieee80211_ifdetach(ic);
965	}
966
967	/* Uninstall interrupt handler. */
968	if (sc->irq != NULL) {
969		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
970		bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
971		    sc->irq);
972		pci_release_msi(dev);
973	}
974
975	/* Free DMA resources. */
976	iwn_free_rx_ring(sc, &sc->rxq);
977	for (qid = 0; qid < sc->ntxqs; qid++)
978		iwn_free_tx_ring(sc, &sc->txq[qid]);
979	iwn_free_sched(sc);
980	iwn_free_kw(sc);
981	if (sc->ict != NULL)
982		iwn_free_ict(sc);
983	iwn_free_fwmem(sc);
984
985	if (sc->mem != NULL)
986		bus_release_resource(dev, SYS_RES_MEMORY,
987		    rman_get_rid(sc->mem), sc->mem);
988
989	if (ifp != NULL)
990		if_free(ifp);
991
992	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
993	IWN_LOCK_DESTROY(sc);
994	return 0;
995}
996
997static int
998iwn_shutdown(device_t dev)
999{
1000	struct iwn_softc *sc = device_get_softc(dev);
1001
1002	iwn_stop(sc);
1003	return 0;
1004}
1005
1006static int
1007iwn_suspend(device_t dev)
1008{
1009	struct iwn_softc *sc = device_get_softc(dev);
1010	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1011
1012	ieee80211_suspend_all(ic);
1013	return 0;
1014}
1015
1016static int
1017iwn_resume(device_t dev)
1018{
1019	struct iwn_softc *sc = device_get_softc(dev);
1020	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1021
1022	/* Clear device-specific "PCI retry timeout" register (41h). */
1023	pci_write_config(dev, 0x41, 0, 1);
1024
1025	ieee80211_resume_all(ic);
1026	return 0;
1027}
1028
1029static int
1030iwn_nic_lock(struct iwn_softc *sc)
1031{
1032	int ntries;
1033
1034	/* Request exclusive access to NIC. */
1035	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1036
1037	/* Spin until we actually get the lock. */
1038	for (ntries = 0; ntries < 1000; ntries++) {
1039		if ((IWN_READ(sc, IWN_GP_CNTRL) &
1040		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1041		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
1042			return 0;
1043		DELAY(10);
1044	}
1045	return ETIMEDOUT;
1046}
1047
1048static __inline void
1049iwn_nic_unlock(struct iwn_softc *sc)
1050{
1051	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1052}
1053
1054static __inline uint32_t
1055iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1056{
1057	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1058	IWN_BARRIER_READ_WRITE(sc);
1059	return IWN_READ(sc, IWN_PRPH_RDATA);
1060}
1061
1062static __inline void
1063iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1064{
1065	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1066	IWN_BARRIER_WRITE(sc);
1067	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1068}
1069
1070static __inline void
1071iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1072{
1073	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1074}
1075
1076static __inline void
1077iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1078{
1079	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1080}
1081
1082static __inline void
1083iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1084    const uint32_t *data, int count)
1085{
1086	for (; count > 0; count--, data++, addr += 4)
1087		iwn_prph_write(sc, addr, *data);
1088}
1089
1090static __inline uint32_t
1091iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1092{
1093	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1094	IWN_BARRIER_READ_WRITE(sc);
1095	return IWN_READ(sc, IWN_MEM_RDATA);
1096}
1097
1098static __inline void
1099iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1100{
1101	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1102	IWN_BARRIER_WRITE(sc);
1103	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1104}
1105
1106static __inline void
1107iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1108{
1109	uint32_t tmp;
1110
1111	tmp = iwn_mem_read(sc, addr & ~3);
1112	if (addr & 3)
1113		tmp = (tmp & 0x0000ffff) | data << 16;
1114	else
1115		tmp = (tmp & 0xffff0000) | data;
1116	iwn_mem_write(sc, addr & ~3, tmp);
1117}
1118
1119static __inline void
1120iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1121    int count)
1122{
1123	for (; count > 0; count--, addr += 4)
1124		*data++ = iwn_mem_read(sc, addr);
1125}
1126
1127static __inline void
1128iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1129    int count)
1130{
1131	for (; count > 0; count--, addr += 4)
1132		iwn_mem_write(sc, addr, val);
1133}
1134
1135static int
1136iwn_eeprom_lock(struct iwn_softc *sc)
1137{
1138	int i, ntries;
1139
1140	for (i = 0; i < 100; i++) {
1141		/* Request exclusive access to EEPROM. */
1142		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1143		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1144
1145		/* Spin until we actually get the lock. */
1146		for (ntries = 0; ntries < 100; ntries++) {
1147			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1148			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1149				return 0;
1150			DELAY(10);
1151		}
1152	}
1153	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
1154	return ETIMEDOUT;
1155}
1156
1157static __inline void
1158iwn_eeprom_unlock(struct iwn_softc *sc)
1159{
1160	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1161}
1162
1163/*
1164 * Initialize access by host to One Time Programmable ROM.
1165 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1166 */
1167static int
1168iwn_init_otprom(struct iwn_softc *sc)
1169{
1170	uint16_t prev, base, next;
1171	int count, error;
1172
1173	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1174
1175	/* Wait for clock stabilization before accessing prph. */
1176	if ((error = iwn_clock_wait(sc)) != 0)
1177		return error;
1178
1179	if ((error = iwn_nic_lock(sc)) != 0)
1180		return error;
1181	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1182	DELAY(5);
1183	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1184	iwn_nic_unlock(sc);
1185
1186	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1187	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1188		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1189		    IWN_RESET_LINK_PWR_MGMT_DIS);
1190	}
1191	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1192	/* Clear ECC status. */
1193	IWN_SETBITS(sc, IWN_OTP_GP,
1194	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1195
1196	/*
1197	 * Find the block before last block (contains the EEPROM image)
1198	 * for HW without OTP shadow RAM.
1199	 */
1200	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1201		/* Switch to absolute addressing mode. */
1202		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1203		base = prev = 0;
1204		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1205			error = iwn_read_prom_data(sc, base, &next, 2);
1206			if (error != 0)
1207				return error;
1208			if (next == 0)	/* End of linked-list. */
1209				break;
1210			prev = base;
1211			base = le16toh(next);
1212		}
1213		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1214			return EIO;
1215		/* Skip "next" word. */
1216		sc->prom_base = prev + 1;
1217	}
1218
1219	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1220
1221	return 0;
1222}
1223
1224static int
1225iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1226{
1227	uint8_t *out = data;
1228	uint32_t val, tmp;
1229	int ntries;
1230
1231	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1232
1233	addr += sc->prom_base;
1234	for (; count > 0; count -= 2, addr++) {
1235		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1236		for (ntries = 0; ntries < 10; ntries++) {
1237			val = IWN_READ(sc, IWN_EEPROM);
1238			if (val & IWN_EEPROM_READ_VALID)
1239				break;
1240			DELAY(5);
1241		}
1242		if (ntries == 10) {
1243			device_printf(sc->sc_dev,
1244			    "timeout reading ROM at 0x%x\n", addr);
1245			return ETIMEDOUT;
1246		}
1247		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1248			/* OTPROM, check for ECC errors. */
1249			tmp = IWN_READ(sc, IWN_OTP_GP);
1250			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1251				device_printf(sc->sc_dev,
1252				    "OTPROM ECC error at 0x%x\n", addr);
1253				return EIO;
1254			}
1255			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1256				/* Correctable ECC error, clear bit. */
1257				IWN_SETBITS(sc, IWN_OTP_GP,
1258				    IWN_OTP_GP_ECC_CORR_STTS);
1259			}
1260		}
1261		*out++ = val >> 16;
1262		if (count > 1)
1263			*out++ = val >> 24;
1264	}
1265
1266	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1267
1268	return 0;
1269}
1270
1271static void
1272iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1273{
1274	if (error != 0)
1275		return;
1276	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1277	*(bus_addr_t *)arg = segs[0].ds_addr;
1278}
1279
1280static int
1281iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1282    void **kvap, bus_size_t size, bus_size_t alignment)
1283{
1284	int error;
1285
1286	dma->tag = NULL;
1287	dma->size = size;
1288
1289	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1290	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1291	    1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1292	if (error != 0)
1293		goto fail;
1294
1295	error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1296	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1297	if (error != 0)
1298		goto fail;
1299
1300	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1301	    iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1302	if (error != 0)
1303		goto fail;
1304
1305	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1306
1307	if (kvap != NULL)
1308		*kvap = dma->vaddr;
1309
1310	return 0;
1311
1312fail:	iwn_dma_contig_free(dma);
1313	return error;
1314}
1315
1316static void
1317iwn_dma_contig_free(struct iwn_dma_info *dma)
1318{
1319	if (dma->map != NULL) {
1320		if (dma->vaddr != NULL) {
1321			bus_dmamap_sync(dma->tag, dma->map,
1322			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1323			bus_dmamap_unload(dma->tag, dma->map);
1324			bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1325			dma->vaddr = NULL;
1326		}
1327		bus_dmamap_destroy(dma->tag, dma->map);
1328		dma->map = NULL;
1329	}
1330	if (dma->tag != NULL) {
1331		bus_dma_tag_destroy(dma->tag);
1332		dma->tag = NULL;
1333	}
1334}
1335
1336static int
1337iwn_alloc_sched(struct iwn_softc *sc)
1338{
1339	/* TX scheduler rings must be aligned on a 1KB boundary. */
1340	return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1341	    sc->schedsz, 1024);
1342}
1343
1344static void
1345iwn_free_sched(struct iwn_softc *sc)
1346{
1347	iwn_dma_contig_free(&sc->sched_dma);
1348}
1349
1350static int
1351iwn_alloc_kw(struct iwn_softc *sc)
1352{
1353	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1354	return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1355}
1356
1357static void
1358iwn_free_kw(struct iwn_softc *sc)
1359{
1360	iwn_dma_contig_free(&sc->kw_dma);
1361}
1362
1363static int
1364iwn_alloc_ict(struct iwn_softc *sc)
1365{
1366	/* ICT table must be aligned on a 4KB boundary. */
1367	return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1368	    IWN_ICT_SIZE, 4096);
1369}
1370
1371static void
1372iwn_free_ict(struct iwn_softc *sc)
1373{
1374	iwn_dma_contig_free(&sc->ict_dma);
1375}
1376
1377static int
1378iwn_alloc_fwmem(struct iwn_softc *sc)
1379{
1380	/* Must be aligned on a 16-byte boundary. */
1381	return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1382}
1383
1384static void
1385iwn_free_fwmem(struct iwn_softc *sc)
1386{
1387	iwn_dma_contig_free(&sc->fw_dma);
1388}
1389
1390static int
1391iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1392{
1393	bus_size_t size;
1394	int i, error;
1395
1396	ring->cur = 0;
1397
1398	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1399
1400	/* Allocate RX descriptors (256-byte aligned). */
1401	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1402	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1403	    size, 256);
1404	if (error != 0) {
1405		device_printf(sc->sc_dev,
1406		    "%s: could not allocate RX ring DMA memory, error %d\n",
1407		    __func__, error);
1408		goto fail;
1409	}
1410
1411	/* Allocate RX status area (16-byte aligned). */
1412	error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1413	    sizeof (struct iwn_rx_status), 16);
1414	if (error != 0) {
1415		device_printf(sc->sc_dev,
1416		    "%s: could not allocate RX status DMA memory, error %d\n",
1417		    __func__, error);
1418		goto fail;
1419	}
1420
1421	/* Create RX buffer DMA tag. */
1422	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1423	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1424	    IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1425	    &ring->data_dmat);
1426	if (error != 0) {
1427		device_printf(sc->sc_dev,
1428		    "%s: could not create RX buf DMA tag, error %d\n",
1429		    __func__, error);
1430		goto fail;
1431	}
1432
1433	/*
1434	 * Allocate and map RX buffers.
1435	 */
1436	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1437		struct iwn_rx_data *data = &ring->data[i];
1438		bus_addr_t paddr;
1439
1440		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1441		if (error != 0) {
1442			device_printf(sc->sc_dev,
1443			    "%s: could not create RX buf DMA map, error %d\n",
1444			    __func__, error);
1445			goto fail;
1446		}
1447
1448		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1449		    IWN_RBUF_SIZE);
1450		if (data->m == NULL) {
1451			device_printf(sc->sc_dev,
1452			    "%s: could not allocate RX mbuf\n", __func__);
1453			error = ENOBUFS;
1454			goto fail;
1455		}
1456
1457		error = bus_dmamap_load(ring->data_dmat, data->map,
1458		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1459		    &paddr, BUS_DMA_NOWAIT);
1460		if (error != 0 && error != EFBIG) {
1461			device_printf(sc->sc_dev,
1462			    "%s: can't not map mbuf, error %d\n", __func__,
1463			    error);
1464			goto fail;
1465		}
1466
1467		/* Set physical address of RX buffer (256-byte aligned). */
1468		ring->desc[i] = htole32(paddr >> 8);
1469	}
1470
1471	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1472	    BUS_DMASYNC_PREWRITE);
1473
1474	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
1475
1476	return 0;
1477
1478fail:	iwn_free_rx_ring(sc, ring);
1479
1480	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
1481
1482	return error;
1483}
1484
1485static void
1486iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1487{
1488	int ntries;
1489
1490	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
1491
1492	if (iwn_nic_lock(sc) == 0) {
1493		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1494		for (ntries = 0; ntries < 1000; ntries++) {
1495			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1496			    IWN_FH_RX_STATUS_IDLE)
1497				break;
1498			DELAY(10);
1499		}
1500		iwn_nic_unlock(sc);
1501	}
1502	ring->cur = 0;
1503	sc->last_rx_valid = 0;
1504}
1505
1506static void
1507iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1508{
1509	int i;
1510
1511	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1512
1513	iwn_dma_contig_free(&ring->desc_dma);
1514	iwn_dma_contig_free(&ring->stat_dma);
1515
1516	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1517		struct iwn_rx_data *data = &ring->data[i];
1518
1519		if (data->m != NULL) {
1520			bus_dmamap_sync(ring->data_dmat, data->map,
1521			    BUS_DMASYNC_POSTREAD);
1522			bus_dmamap_unload(ring->data_dmat, data->map);
1523			m_freem(data->m);
1524			data->m = NULL;
1525		}
1526		if (data->map != NULL)
1527			bus_dmamap_destroy(ring->data_dmat, data->map);
1528	}
1529	if (ring->data_dmat != NULL) {
1530		bus_dma_tag_destroy(ring->data_dmat);
1531		ring->data_dmat = NULL;
1532	}
1533}
1534
1535static int
1536iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1537{
1538	bus_addr_t paddr;
1539	bus_size_t size;
1540	int i, error;
1541
1542	ring->qid = qid;
1543	ring->queued = 0;
1544	ring->cur = 0;
1545
1546	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1547
1548	/* Allocate TX descriptors (256-byte aligned). */
1549	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1550	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1551	    size, 256);
1552	if (error != 0) {
1553		device_printf(sc->sc_dev,
1554		    "%s: could not allocate TX ring DMA memory, error %d\n",
1555		    __func__, error);
1556		goto fail;
1557	}
1558
1559	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1560	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1561	    size, 4);
1562	if (error != 0) {
1563		device_printf(sc->sc_dev,
1564		    "%s: could not allocate TX cmd DMA memory, error %d\n",
1565		    __func__, error);
1566		goto fail;
1567	}
1568
1569	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1570	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1571	    IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1572	    &ring->data_dmat);
1573	if (error != 0) {
1574		device_printf(sc->sc_dev,
1575		    "%s: could not create TX buf DMA tag, error %d\n",
1576		    __func__, error);
1577		goto fail;
1578	}
1579
1580	paddr = ring->cmd_dma.paddr;
1581	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1582		struct iwn_tx_data *data = &ring->data[i];
1583
1584		data->cmd_paddr = paddr;
1585		data->scratch_paddr = paddr + 12;
1586		paddr += sizeof (struct iwn_tx_cmd);
1587
1588		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1589		if (error != 0) {
1590			device_printf(sc->sc_dev,
1591			    "%s: could not create TX buf DMA map, error %d\n",
1592			    __func__, error);
1593			goto fail;
1594		}
1595	}
1596
1597	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1598
1599	return 0;
1600
1601fail:	iwn_free_tx_ring(sc, ring);
1602	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
1603	return error;
1604}
1605
1606static void
1607iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1608{
1609	int i;
1610
1611	DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
1612
1613	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1614		struct iwn_tx_data *data = &ring->data[i];
1615
1616		if (data->m != NULL) {
1617			bus_dmamap_sync(ring->data_dmat, data->map,
1618			    BUS_DMASYNC_POSTWRITE);
1619			bus_dmamap_unload(ring->data_dmat, data->map);
1620			m_freem(data->m);
1621			data->m = NULL;
1622		}
1623	}
1624	/* Clear TX descriptors. */
1625	memset(ring->desc, 0, ring->desc_dma.size);
1626	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1627	    BUS_DMASYNC_PREWRITE);
1628	sc->qfullmsk &= ~(1 << ring->qid);
1629	ring->queued = 0;
1630	ring->cur = 0;
1631}
1632
1633static void
1634iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1635{
1636	int i;
1637
1638	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1639
1640	iwn_dma_contig_free(&ring->desc_dma);
1641	iwn_dma_contig_free(&ring->cmd_dma);
1642
1643	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1644		struct iwn_tx_data *data = &ring->data[i];
1645
1646		if (data->m != NULL) {
1647			bus_dmamap_sync(ring->data_dmat, data->map,
1648			    BUS_DMASYNC_POSTWRITE);
1649			bus_dmamap_unload(ring->data_dmat, data->map);
1650			m_freem(data->m);
1651		}
1652		if (data->map != NULL)
1653			bus_dmamap_destroy(ring->data_dmat, data->map);
1654	}
1655	if (ring->data_dmat != NULL) {
1656		bus_dma_tag_destroy(ring->data_dmat);
1657		ring->data_dmat = NULL;
1658	}
1659}
1660
1661static void
1662iwn5000_ict_reset(struct iwn_softc *sc)
1663{
1664	/* Disable interrupts. */
1665	IWN_WRITE(sc, IWN_INT_MASK, 0);
1666
1667	/* Reset ICT table. */
1668	memset(sc->ict, 0, IWN_ICT_SIZE);
1669	sc->ict_cur = 0;
1670
1671	/* Set physical address of ICT table (4KB aligned). */
1672	DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1673	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1674	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1675
1676	/* Enable periodic RX interrupt. */
1677	sc->int_mask |= IWN_INT_RX_PERIODIC;
1678	/* Switch to ICT interrupt mode in driver. */
1679	sc->sc_flags |= IWN_FLAG_USE_ICT;
1680
1681	/* Re-enable interrupts. */
1682	IWN_WRITE(sc, IWN_INT, 0xffffffff);
1683	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1684}
1685
1686static int
1687iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1688{
1689	struct iwn_ops *ops = &sc->ops;
1690	uint16_t val;
1691	int error;
1692
1693	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1694
1695	/* Check whether adapter has an EEPROM or an OTPROM. */
1696	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1697	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1698		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1699	DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1700	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1701
1702	/* Adapter has to be powered on for EEPROM access to work. */
1703	if ((error = iwn_apm_init(sc)) != 0) {
1704		device_printf(sc->sc_dev,
1705		    "%s: could not power ON adapter, error %d\n", __func__,
1706		    error);
1707		return error;
1708	}
1709
1710	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1711		device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1712		return EIO;
1713	}
1714	if ((error = iwn_eeprom_lock(sc)) != 0) {
1715		device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
1716		    __func__, error);
1717		return error;
1718	}
1719	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1720		if ((error = iwn_init_otprom(sc)) != 0) {
1721			device_printf(sc->sc_dev,
1722			    "%s: could not initialize OTPROM, error %d\n",
1723			    __func__, error);
1724			return error;
1725		}
1726	}
1727
1728	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1729	DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
1730	/* Check if HT support is bonded out. */
1731	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1732		sc->sc_flags |= IWN_FLAG_HAS_11N;
1733
1734	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1735	sc->rfcfg = le16toh(val);
1736	DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1737	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
1738	if (sc->txchainmask == 0)
1739		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1740	if (sc->rxchainmask == 0)
1741		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1742
1743	/* Read MAC address. */
1744	iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1745
1746	/* Read adapter-specific information from EEPROM. */
1747	ops->read_eeprom(sc);
1748
1749	iwn_apm_stop(sc);	/* Power OFF adapter. */
1750
1751	iwn_eeprom_unlock(sc);
1752
1753	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1754
1755	return 0;
1756}
1757
1758static void
1759iwn4965_read_eeprom(struct iwn_softc *sc)
1760{
1761	uint32_t addr;
1762	uint16_t val;
1763	int i;
1764
1765	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1766
1767	/* Read regulatory domain (4 ASCII characters). */
1768	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1769
1770	/* Read the list of authorized channels (20MHz ones only). */
1771	for (i = 0; i < 7; i++) {
1772		addr = iwn4965_regulatory_bands[i];
1773		iwn_read_eeprom_channels(sc, i, addr);
1774	}
1775
1776	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1777	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1778	sc->maxpwr2GHz = val & 0xff;
1779	sc->maxpwr5GHz = val >> 8;
1780	/* Check that EEPROM values are within valid range. */
1781	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1782		sc->maxpwr5GHz = 38;
1783	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1784		sc->maxpwr2GHz = 38;
1785	DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1786	    sc->maxpwr2GHz, sc->maxpwr5GHz);
1787
1788	/* Read samples for each TX power group. */
1789	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1790	    sizeof sc->bands);
1791
1792	/* Read voltage at which samples were taken. */
1793	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1794	sc->eeprom_voltage = (int16_t)le16toh(val);
1795	DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1796	    sc->eeprom_voltage);
1797
1798#ifdef IWN_DEBUG
1799	/* Print samples. */
1800	if (sc->sc_debug & IWN_DEBUG_ANY) {
1801		for (i = 0; i < IWN_NBANDS; i++)
1802			iwn4965_print_power_group(sc, i);
1803	}
1804#endif
1805
1806	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1807}
1808
1809#ifdef IWN_DEBUG
1810static void
1811iwn4965_print_power_group(struct iwn_softc *sc, int i)
1812{
1813	struct iwn4965_eeprom_band *band = &sc->bands[i];
1814	struct iwn4965_eeprom_chan_samples *chans = band->chans;
1815	int j, c;
1816
1817	printf("===band %d===\n", i);
1818	printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1819	printf("chan1 num=%d\n", chans[0].num);
1820	for (c = 0; c < 2; c++) {
1821		for (j = 0; j < IWN_NSAMPLES; j++) {
1822			printf("chain %d, sample %d: temp=%d gain=%d "
1823			    "power=%d pa_det=%d\n", c, j,
1824			    chans[0].samples[c][j].temp,
1825			    chans[0].samples[c][j].gain,
1826			    chans[0].samples[c][j].power,
1827			    chans[0].samples[c][j].pa_det);
1828		}
1829	}
1830	printf("chan2 num=%d\n", chans[1].num);
1831	for (c = 0; c < 2; c++) {
1832		for (j = 0; j < IWN_NSAMPLES; j++) {
1833			printf("chain %d, sample %d: temp=%d gain=%d "
1834			    "power=%d pa_det=%d\n", c, j,
1835			    chans[1].samples[c][j].temp,
1836			    chans[1].samples[c][j].gain,
1837			    chans[1].samples[c][j].power,
1838			    chans[1].samples[c][j].pa_det);
1839		}
1840	}
1841}
1842#endif
1843
1844static void
1845iwn5000_read_eeprom(struct iwn_softc *sc)
1846{
1847	struct iwn5000_eeprom_calib_hdr hdr;
1848	int32_t volt;
1849	uint32_t base, addr;
1850	uint16_t val;
1851	int i;
1852
1853	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1854
1855	/* Read regulatory domain (4 ASCII characters). */
1856	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1857	base = le16toh(val);
1858	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1859	    sc->eeprom_domain, 4);
1860
1861	/* Read the list of authorized channels (20MHz ones only). */
1862	for (i = 0; i < 7; i++) {
1863		if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1864			addr = base + iwn6000_regulatory_bands[i];
1865		else
1866			addr = base + iwn5000_regulatory_bands[i];
1867		iwn_read_eeprom_channels(sc, i, addr);
1868	}
1869
1870	/* Read enhanced TX power information for 6000 Series. */
1871	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1872		iwn_read_eeprom_enhinfo(sc);
1873
1874	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1875	base = le16toh(val);
1876	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1877	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1878	    "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
1879	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
1880	sc->calib_ver = hdr.version;
1881
1882	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1883		/* Compute temperature offset. */
1884		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1885		sc->eeprom_temp = le16toh(val);
1886		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1887		volt = le16toh(val);
1888		sc->temp_off = sc->eeprom_temp - (volt / -5);
1889		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1890		    sc->eeprom_temp, volt, sc->temp_off);
1891	} else {
1892		/* Read crystal calibration. */
1893		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1894		    &sc->eeprom_crystal, sizeof (uint32_t));
1895		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1896		    le32toh(sc->eeprom_crystal));
1897	}
1898
1899	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1900
1901}
1902
1903/*
1904 * Translate EEPROM flags to net80211.
1905 */
1906static uint32_t
1907iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1908{
1909	uint32_t nflags;
1910
1911	nflags = 0;
1912	if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1913		nflags |= IEEE80211_CHAN_PASSIVE;
1914	if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1915		nflags |= IEEE80211_CHAN_NOADHOC;
1916	if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1917		nflags |= IEEE80211_CHAN_DFS;
1918		/* XXX apparently IBSS may still be marked */
1919		nflags |= IEEE80211_CHAN_NOADHOC;
1920	}
1921
1922	return nflags;
1923}
1924
1925static void
1926iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1927{
1928	struct ifnet *ifp = sc->sc_ifp;
1929	struct ieee80211com *ic = ifp->if_l2com;
1930	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1931	const struct iwn_chan_band *band = &iwn_bands[n];
1932	struct ieee80211_channel *c;
1933	uint8_t chan;
1934	int i, nflags;
1935
1936	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1937
1938	for (i = 0; i < band->nchan; i++) {
1939		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1940			DPRINTF(sc, IWN_DEBUG_RESET,
1941			    "skip chan %d flags 0x%x maxpwr %d\n",
1942			    band->chan[i], channels[i].flags,
1943			    channels[i].maxpwr);
1944			continue;
1945		}
1946		chan = band->chan[i];
1947		nflags = iwn_eeprom_channel_flags(&channels[i]);
1948
1949		c = &ic->ic_channels[ic->ic_nchans++];
1950		c->ic_ieee = chan;
1951		c->ic_maxregpower = channels[i].maxpwr;
1952		c->ic_maxpower = 2*c->ic_maxregpower;
1953
1954		if (n == 0) {	/* 2GHz band */
1955			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
1956			/* G =>'s B is supported */
1957			c->ic_flags = IEEE80211_CHAN_B | nflags;
1958			c = &ic->ic_channels[ic->ic_nchans++];
1959			c[0] = c[-1];
1960			c->ic_flags = IEEE80211_CHAN_G | nflags;
1961		} else {	/* 5GHz band */
1962			c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
1963			c->ic_flags = IEEE80211_CHAN_A | nflags;
1964		}
1965
1966		/* Save maximum allowed TX power for this channel. */
1967		sc->maxpwr[chan] = channels[i].maxpwr;
1968
1969		DPRINTF(sc, IWN_DEBUG_RESET,
1970		    "add chan %d flags 0x%x maxpwr %d\n", chan,
1971		    channels[i].flags, channels[i].maxpwr);
1972
1973		if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1974			/* add HT20, HT40 added separately */
1975			c = &ic->ic_channels[ic->ic_nchans++];
1976			c[0] = c[-1];
1977			c->ic_flags |= IEEE80211_CHAN_HT20;
1978		}
1979	}
1980
1981	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1982
1983}
1984
1985static void
1986iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1987{
1988	struct ifnet *ifp = sc->sc_ifp;
1989	struct ieee80211com *ic = ifp->if_l2com;
1990	struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1991	const struct iwn_chan_band *band = &iwn_bands[n];
1992	struct ieee80211_channel *c, *cent, *extc;
1993	uint8_t chan;
1994	int i, nflags;
1995
1996	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
1997
1998	if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
1999		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
2000		return;
2001	}
2002
2003	for (i = 0; i < band->nchan; i++) {
2004		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2005			DPRINTF(sc, IWN_DEBUG_RESET,
2006			    "skip chan %d flags 0x%x maxpwr %d\n",
2007			    band->chan[i], channels[i].flags,
2008			    channels[i].maxpwr);
2009			continue;
2010		}
2011		chan = band->chan[i];
2012		nflags = iwn_eeprom_channel_flags(&channels[i]);
2013
2014		/*
2015		 * Each entry defines an HT40 channel pair; find the
2016		 * center channel, then the extension channel above.
2017		 */
2018		cent = ieee80211_find_channel_byieee(ic, chan,
2019		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2020		if (cent == NULL) {	/* XXX shouldn't happen */
2021			device_printf(sc->sc_dev,
2022			    "%s: no entry for channel %d\n", __func__, chan);
2023			continue;
2024		}
2025		extc = ieee80211_find_channel(ic, cent->ic_freq+20,
2026		    (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2027		if (extc == NULL) {
2028			DPRINTF(sc, IWN_DEBUG_RESET,
2029			    "%s: skip chan %d, extension channel not found\n",
2030			    __func__, chan);
2031			continue;
2032		}
2033
2034		DPRINTF(sc, IWN_DEBUG_RESET,
2035		    "add ht40 chan %d flags 0x%x maxpwr %d\n",
2036		    chan, channels[i].flags, channels[i].maxpwr);
2037
2038		c = &ic->ic_channels[ic->ic_nchans++];
2039		c[0] = cent[0];
2040		c->ic_extieee = extc->ic_ieee;
2041		c->ic_flags &= ~IEEE80211_CHAN_HT;
2042		c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
2043		c = &ic->ic_channels[ic->ic_nchans++];
2044		c[0] = extc[0];
2045		c->ic_extieee = cent->ic_ieee;
2046		c->ic_flags &= ~IEEE80211_CHAN_HT;
2047		c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
2048	}
2049
2050	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2051
2052}
2053
2054static void
2055iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2056{
2057	struct ifnet *ifp = sc->sc_ifp;
2058	struct ieee80211com *ic = ifp->if_l2com;
2059
2060	iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
2061	    iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
2062
2063	if (n < 5)
2064		iwn_read_eeprom_band(sc, n);
2065	else
2066		iwn_read_eeprom_ht40(sc, n);
2067	ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
2068}
2069
2070static struct iwn_eeprom_chan *
2071iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
2072{
2073	int band, chan, i, j;
2074
2075	if (IEEE80211_IS_CHAN_HT40(c)) {
2076		band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
2077		if (IEEE80211_IS_CHAN_HT40D(c))
2078			chan = c->ic_extieee;
2079		else
2080			chan = c->ic_ieee;
2081		for (i = 0; i < iwn_bands[band].nchan; i++) {
2082			if (iwn_bands[band].chan[i] == chan)
2083				return &sc->eeprom_channels[band][i];
2084		}
2085	} else {
2086		for (j = 0; j < 5; j++) {
2087			for (i = 0; i < iwn_bands[j].nchan; i++) {
2088				if (iwn_bands[j].chan[i] == c->ic_ieee)
2089					return &sc->eeprom_channels[j][i];
2090			}
2091		}
2092	}
2093	return NULL;
2094}
2095
2096/*
2097 * Enforce flags read from EEPROM.
2098 */
2099static int
2100iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
2101    int nchan, struct ieee80211_channel chans[])
2102{
2103	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2104	int i;
2105
2106	for (i = 0; i < nchan; i++) {
2107		struct ieee80211_channel *c = &chans[i];
2108		struct iwn_eeprom_chan *channel;
2109
2110		channel = iwn_find_eeprom_channel(sc, c);
2111		if (channel == NULL) {
2112			if_printf(ic->ic_ifp,
2113			    "%s: invalid channel %u freq %u/0x%x\n",
2114			    __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2115			return EINVAL;
2116		}
2117		c->ic_flags |= iwn_eeprom_channel_flags(channel);
2118	}
2119
2120	return 0;
2121}
2122
2123static void
2124iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2125{
2126	struct iwn_eeprom_enhinfo enhinfo[35];
2127	struct ifnet *ifp = sc->sc_ifp;
2128	struct ieee80211com *ic = ifp->if_l2com;
2129	struct ieee80211_channel *c;
2130	uint16_t val, base;
2131	int8_t maxpwr;
2132	uint8_t flags;
2133	int i, j;
2134
2135	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2136
2137	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2138	base = le16toh(val);
2139	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2140	    enhinfo, sizeof enhinfo);
2141
2142	for (i = 0; i < nitems(enhinfo); i++) {
2143		flags = enhinfo[i].flags;
2144		if (!(flags & IWN_ENHINFO_VALID))
2145			continue;	/* Skip invalid entries. */
2146
2147		maxpwr = 0;
2148		if (sc->txchainmask & IWN_ANT_A)
2149			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2150		if (sc->txchainmask & IWN_ANT_B)
2151			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2152		if (sc->txchainmask & IWN_ANT_C)
2153			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2154		if (sc->ntxchains == 2)
2155			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2156		else if (sc->ntxchains == 3)
2157			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2158
2159		for (j = 0; j < ic->ic_nchans; j++) {
2160			c = &ic->ic_channels[j];
2161			if ((flags & IWN_ENHINFO_5GHZ)) {
2162				if (!IEEE80211_IS_CHAN_A(c))
2163					continue;
2164			} else if ((flags & IWN_ENHINFO_OFDM)) {
2165				if (!IEEE80211_IS_CHAN_G(c))
2166					continue;
2167			} else if (!IEEE80211_IS_CHAN_B(c))
2168				continue;
2169			if ((flags & IWN_ENHINFO_HT40)) {
2170				if (!IEEE80211_IS_CHAN_HT40(c))
2171					continue;
2172			} else {
2173				if (IEEE80211_IS_CHAN_HT40(c))
2174					continue;
2175			}
2176			if (enhinfo[i].chan != 0 &&
2177			    enhinfo[i].chan != c->ic_ieee)
2178				continue;
2179
2180			DPRINTF(sc, IWN_DEBUG_RESET,
2181			    "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2182			    c->ic_flags, maxpwr / 2);
2183			c->ic_maxregpower = maxpwr / 2;
2184			c->ic_maxpower = maxpwr;
2185		}
2186	}
2187
2188	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2189
2190}
2191
2192static struct ieee80211_node *
2193iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2194{
2195	return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2196}
2197
2198static __inline int
2199rate2plcp(int rate)
2200{
2201	switch (rate & 0xff) {
2202	case 12:	return 0xd;
2203	case 18:	return 0xf;
2204	case 24:	return 0x5;
2205	case 36:	return 0x7;
2206	case 48:	return 0x9;
2207	case 72:	return 0xb;
2208	case 96:	return 0x1;
2209	case 108:	return 0x3;
2210	case 2:		return 10;
2211	case 4:		return 20;
2212	case 11:	return 55;
2213	case 22:	return 110;
2214	}
2215	return 0;
2216}
2217
2218/*
2219 * Calculate the required PLCP value from the given rate,
2220 * to the given node.
2221 *
2222 * This will take the node configuration (eg 11n, rate table
2223 * setup, etc) into consideration.
2224 */
2225static uint32_t
2226iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2227    uint8_t rate)
2228{
2229#define	RV(v)	((v) & IEEE80211_RATE_VAL)
2230	struct ieee80211com *ic = ni->ni_ic;
2231	uint8_t txant1, txant2;
2232	uint32_t plcp = 0;
2233	int ridx;
2234
2235	/* Use the first valid TX antenna. */
2236	txant1 = IWN_LSB(sc->txchainmask);
2237	txant2 = IWN_LSB(sc->txchainmask & ~txant1);
2238
2239	/*
2240	 * If it's an MCS rate, let's set the plcp correctly
2241	 * and set the relevant flags based on the node config.
2242	 */
2243	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
2244		/*
2245		 * Set the initial PLCP value to be between 0->31 for
2246		 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2247		 * flag.
2248		 */
2249		plcp = RV(rate) | IWN_RFLAG_MCS;
2250
2251		/*
2252		 * XXX the following should only occur if both
2253		 * the local configuration _and_ the remote node
2254		 * advertise these capabilities.  Thus this code
2255		 * may need fixing!
2256		 */
2257
2258		/*
2259		 * Set the channel width and guard interval.
2260		 */
2261		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2262			plcp |= IWN_RFLAG_HT40;
2263			if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2264				plcp |= IWN_RFLAG_SGI;
2265		} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2266			plcp |= IWN_RFLAG_SGI;
2267		}
2268
2269		/*
2270		 * If it's a two stream rate, enable TX on both
2271		 * antennas.
2272		 *
2273		 * XXX three stream rates?
2274		 */
2275		if (rate > 0x87)
2276			plcp |= IWN_RFLAG_ANT(txant1 | txant2);
2277		else
2278			plcp |= IWN_RFLAG_ANT(txant1);
2279	} else {
2280		/*
2281		 * Set the initial PLCP - fine for both
2282		 * OFDM and CCK rates.
2283		 */
2284		plcp = rate2plcp(rate);
2285
2286		/* Set CCK flag if it's CCK */
2287
2288		/* XXX It would be nice to have a method
2289		 * to map the ridx -> phy table entry
2290		 * so we could just query that, rather than
2291		 * this hack to check against IWN_RIDX_OFDM6.
2292		 */
2293		ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2294		    rate & IEEE80211_RATE_VAL);
2295		if (ridx < IWN_RIDX_OFDM6 &&
2296		    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2297			plcp |= IWN_RFLAG_CCK;
2298
2299		/* Set antenna configuration */
2300		plcp |= IWN_RFLAG_ANT(txant1);
2301	}
2302
2303	DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2304	    __func__,
2305	    rate,
2306	    plcp);
2307
2308	return (htole32(plcp));
2309#undef	RV
2310}
2311
2312static void
2313iwn_newassoc(struct ieee80211_node *ni, int isnew)
2314{
2315	/* Doesn't do anything at the moment */
2316}
2317
2318static int
2319iwn_media_change(struct ifnet *ifp)
2320{
2321	int error;
2322
2323	error = ieee80211_media_change(ifp);
2324	/* NB: only the fixed rate can change and that doesn't need a reset */
2325	return (error == ENETRESET ? 0 : error);
2326}
2327
2328static int
2329iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2330{
2331	struct iwn_vap *ivp = IWN_VAP(vap);
2332	struct ieee80211com *ic = vap->iv_ic;
2333	struct iwn_softc *sc = ic->ic_ifp->if_softc;
2334	int error = 0;
2335
2336	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2337
2338	DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2339	    ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2340
2341	IEEE80211_UNLOCK(ic);
2342	IWN_LOCK(sc);
2343	callout_stop(&sc->calib_to);
2344
2345	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
2346
2347	switch (nstate) {
2348	case IEEE80211_S_ASSOC:
2349		if (vap->iv_state != IEEE80211_S_RUN)
2350			break;
2351		/* FALLTHROUGH */
2352	case IEEE80211_S_AUTH:
2353		if (vap->iv_state == IEEE80211_S_AUTH)
2354			break;
2355
2356		/*
2357		 * !AUTH -> AUTH transition requires state reset to handle
2358		 * reassociations correctly.
2359		 */
2360		sc->rxon->associd = 0;
2361		sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
2362		sc->calib.state = IWN_CALIB_STATE_INIT;
2363
2364		if ((error = iwn_auth(sc, vap)) != 0) {
2365			device_printf(sc->sc_dev,
2366			    "%s: could not move to auth state\n", __func__);
2367		}
2368		break;
2369
2370	case IEEE80211_S_RUN:
2371		/*
2372		 * RUN -> RUN transition; Just restart the timers.
2373		 */
2374		if (vap->iv_state == IEEE80211_S_RUN) {
2375			sc->calib_cnt = 0;
2376			break;
2377		}
2378
2379		/*
2380		 * !RUN -> RUN requires setting the association id
2381		 * which is done with a firmware cmd.  We also defer
2382		 * starting the timers until that work is done.
2383		 */
2384		if ((error = iwn_run(sc, vap)) != 0) {
2385			device_printf(sc->sc_dev,
2386			    "%s: could not move to run state\n", __func__);
2387		}
2388		break;
2389
2390	case IEEE80211_S_INIT:
2391		sc->calib.state = IWN_CALIB_STATE_INIT;
2392		break;
2393
2394	default:
2395		break;
2396	}
2397	IWN_UNLOCK(sc);
2398	IEEE80211_LOCK(ic);
2399	if (error != 0){
2400		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2401		return error;
2402	}
2403
2404	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2405
2406	return ivp->iv_newstate(vap, nstate, arg);
2407}
2408
2409static void
2410iwn_calib_timeout(void *arg)
2411{
2412	struct iwn_softc *sc = arg;
2413
2414	IWN_LOCK_ASSERT(sc);
2415
2416	/* Force automatic TX power calibration every 60 secs. */
2417	if (++sc->calib_cnt >= 120) {
2418		uint32_t flags = 0;
2419
2420		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2421		    "sending request for statistics");
2422		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2423		    sizeof flags, 1);
2424		sc->calib_cnt = 0;
2425	}
2426	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2427	    sc);
2428}
2429
2430/*
2431 * Process an RX_PHY firmware notification.  This is usually immediately
2432 * followed by an MPDU_RX_DONE notification.
2433 */
2434static void
2435iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2436    struct iwn_rx_data *data)
2437{
2438	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2439
2440	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2441	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2442
2443	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2444	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2445	sc->last_rx_valid = 1;
2446}
2447
2448/*
2449 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2450 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2451 */
2452static void
2453iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2454    struct iwn_rx_data *data)
2455{
2456	struct iwn_ops *ops = &sc->ops;
2457	struct ifnet *ifp = sc->sc_ifp;
2458	struct ieee80211com *ic = ifp->if_l2com;
2459	struct iwn_rx_ring *ring = &sc->rxq;
2460	struct ieee80211_frame *wh;
2461	struct ieee80211_node *ni;
2462	struct mbuf *m, *m1;
2463	struct iwn_rx_stat *stat;
2464	caddr_t head;
2465	bus_addr_t paddr;
2466	uint32_t flags;
2467	int error, len, rssi, nf;
2468
2469	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2470
2471	if (desc->type == IWN_MPDU_RX_DONE) {
2472		/* Check for prior RX_PHY notification. */
2473		if (!sc->last_rx_valid) {
2474			DPRINTF(sc, IWN_DEBUG_ANY,
2475			    "%s: missing RX_PHY\n", __func__);
2476			return;
2477		}
2478		stat = &sc->last_rx_stat;
2479	} else
2480		stat = (struct iwn_rx_stat *)(desc + 1);
2481
2482	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2483
2484	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2485		device_printf(sc->sc_dev,
2486		    "%s: invalid RX statistic header, len %d\n", __func__,
2487		    stat->cfg_phy_len);
2488		return;
2489	}
2490	if (desc->type == IWN_MPDU_RX_DONE) {
2491		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2492		head = (caddr_t)(mpdu + 1);
2493		len = le16toh(mpdu->len);
2494	} else {
2495		head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2496		len = le16toh(stat->len);
2497	}
2498
2499	flags = le32toh(*(uint32_t *)(head + len));
2500
2501	/* Discard frames with a bad FCS early. */
2502	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2503		DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2504		    __func__, flags);
2505		ifp->if_ierrors++;
2506		return;
2507	}
2508	/* Discard frames that are too short. */
2509	if (len < sizeof (*wh)) {
2510		DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2511		    __func__, len);
2512		ifp->if_ierrors++;
2513		return;
2514	}
2515
2516	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2517	if (m1 == NULL) {
2518		DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2519		    __func__);
2520		ifp->if_ierrors++;
2521		return;
2522	}
2523	bus_dmamap_unload(ring->data_dmat, data->map);
2524
2525	error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2526	    IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2527	if (error != 0 && error != EFBIG) {
2528		device_printf(sc->sc_dev,
2529		    "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2530		m_freem(m1);
2531
2532		/* Try to reload the old mbuf. */
2533		error = bus_dmamap_load(ring->data_dmat, data->map,
2534		    mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
2535		    &paddr, BUS_DMA_NOWAIT);
2536		if (error != 0 && error != EFBIG) {
2537			panic("%s: could not load old RX mbuf", __func__);
2538		}
2539		/* Physical address may have changed. */
2540		ring->desc[ring->cur] = htole32(paddr >> 8);
2541		bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2542		    BUS_DMASYNC_PREWRITE);
2543		ifp->if_ierrors++;
2544		return;
2545	}
2546
2547	m = data->m;
2548	data->m = m1;
2549	/* Update RX descriptor. */
2550	ring->desc[ring->cur] = htole32(paddr >> 8);
2551	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2552	    BUS_DMASYNC_PREWRITE);
2553
2554	/* Finalize mbuf. */
2555	m->m_pkthdr.rcvif = ifp;
2556	m->m_data = head;
2557	m->m_pkthdr.len = m->m_len = len;
2558
2559	/* Grab a reference to the source node. */
2560	wh = mtod(m, struct ieee80211_frame *);
2561	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2562	nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2563	    (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2564
2565	rssi = ops->get_rssi(sc, stat);
2566
2567	if (ieee80211_radiotap_active(ic)) {
2568		struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2569
2570		tap->wr_flags = 0;
2571		if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2572			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2573		tap->wr_dbm_antsignal = (int8_t)rssi;
2574		tap->wr_dbm_antnoise = (int8_t)nf;
2575		tap->wr_tsft = stat->tstamp;
2576		switch (stat->rate) {
2577		/* CCK rates. */
2578		case  10: tap->wr_rate =   2; break;
2579		case  20: tap->wr_rate =   4; break;
2580		case  55: tap->wr_rate =  11; break;
2581		case 110: tap->wr_rate =  22; break;
2582		/* OFDM rates. */
2583		case 0xd: tap->wr_rate =  12; break;
2584		case 0xf: tap->wr_rate =  18; break;
2585		case 0x5: tap->wr_rate =  24; break;
2586		case 0x7: tap->wr_rate =  36; break;
2587		case 0x9: tap->wr_rate =  48; break;
2588		case 0xb: tap->wr_rate =  72; break;
2589		case 0x1: tap->wr_rate =  96; break;
2590		case 0x3: tap->wr_rate = 108; break;
2591		/* Unknown rate: should not happen. */
2592		default:  tap->wr_rate =   0;
2593		}
2594	}
2595
2596	IWN_UNLOCK(sc);
2597
2598	/* Send the frame to the 802.11 layer. */
2599	if (ni != NULL) {
2600		if (ni->ni_flags & IEEE80211_NODE_HT)
2601			m->m_flags |= M_AMPDU;
2602		(void)ieee80211_input(ni, m, rssi - nf, nf);
2603		/* Node is no longer needed. */
2604		ieee80211_free_node(ni);
2605	} else
2606		(void)ieee80211_input_all(ic, m, rssi - nf, nf);
2607
2608	IWN_LOCK(sc);
2609
2610	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2611
2612}
2613
2614/* Process an incoming Compressed BlockAck. */
2615static void
2616iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2617    struct iwn_rx_data *data)
2618{
2619	struct iwn_ops *ops = &sc->ops;
2620	struct ifnet *ifp = sc->sc_ifp;
2621	struct iwn_node *wn;
2622	struct ieee80211_node *ni;
2623	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2624	struct iwn_tx_ring *txq;
2625	struct iwn_tx_data *txdata;
2626	struct ieee80211_tx_ampdu *tap;
2627	struct mbuf *m;
2628	uint64_t bitmap;
2629	uint16_t ssn;
2630	uint8_t tid;
2631	int ackfailcnt = 0, i, lastidx, qid, *res, shift;
2632
2633	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2634
2635	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2636
2637	qid = le16toh(ba->qid);
2638	txq = &sc->txq[ba->qid];
2639	tap = sc->qid2tap[ba->qid];
2640	tid = tap->txa_tid;
2641	wn = (void *)tap->txa_ni;
2642
2643	res = NULL;
2644	ssn = 0;
2645	if (!IEEE80211_AMPDU_RUNNING(tap)) {
2646		res = tap->txa_private;
2647		ssn = tap->txa_start & 0xfff;
2648	}
2649
2650	for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
2651		txdata = &txq->data[txq->read];
2652
2653		/* Unmap and free mbuf. */
2654		bus_dmamap_sync(txq->data_dmat, txdata->map,
2655		    BUS_DMASYNC_POSTWRITE);
2656		bus_dmamap_unload(txq->data_dmat, txdata->map);
2657		m = txdata->m, txdata->m = NULL;
2658		ni = txdata->ni, txdata->ni = NULL;
2659
2660		KASSERT(ni != NULL, ("no node"));
2661		KASSERT(m != NULL, ("no mbuf"));
2662
2663		ieee80211_tx_complete(ni, m, 1);
2664
2665		txq->queued--;
2666		txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2667	}
2668
2669	if (txq->queued == 0 && res != NULL) {
2670		iwn_nic_lock(sc);
2671		ops->ampdu_tx_stop(sc, qid, tid, ssn);
2672		iwn_nic_unlock(sc);
2673		sc->qid2tap[qid] = NULL;
2674		free(res, M_DEVBUF);
2675		return;
2676	}
2677
2678	if (wn->agg[tid].bitmap == 0)
2679		return;
2680
2681	shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
2682	if (shift < 0)
2683		shift += 0x100;
2684
2685	if (wn->agg[tid].nframes > (64 - shift))
2686		return;
2687
2688	ni = tap->txa_ni;
2689	bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
2690	for (i = 0; bitmap; i++) {
2691		if ((bitmap & 1) == 0) {
2692			ifp->if_oerrors++;
2693			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2694			    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2695		} else {
2696			ifp->if_opackets++;
2697			ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2698			    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2699		}
2700		bitmap >>= 1;
2701	}
2702
2703	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2704
2705}
2706
2707/*
2708 * Process a CALIBRATION_RESULT notification sent by the initialization
2709 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2710 */
2711static void
2712iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2713    struct iwn_rx_data *data)
2714{
2715	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2716	int len, idx = -1;
2717
2718	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2719
2720	/* Runtime firmware should not send such a notification. */
2721	if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
2722		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
2723	    __func__);
2724		return;
2725	}
2726	len = (le32toh(desc->len) & 0x3fff) - 4;
2727	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2728
2729	switch (calib->code) {
2730	case IWN5000_PHY_CALIB_DC:
2731		if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 &&
2732		    (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2733		     sc->hw_type >= IWN_HW_REV_TYPE_6000) &&
2734		     sc->hw_type != IWN_HW_REV_TYPE_6050)
2735			idx = 0;
2736		break;
2737	case IWN5000_PHY_CALIB_LO:
2738		idx = 1;
2739		break;
2740	case IWN5000_PHY_CALIB_TX_IQ:
2741		idx = 2;
2742		break;
2743	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2744		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2745		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2746			idx = 3;
2747		break;
2748	case IWN5000_PHY_CALIB_BASE_BAND:
2749		idx = 4;
2750		break;
2751	}
2752	if (idx == -1)	/* Ignore other results. */
2753		return;
2754
2755	/* Save calibration result. */
2756	if (sc->calibcmd[idx].buf != NULL)
2757		free(sc->calibcmd[idx].buf, M_DEVBUF);
2758	sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2759	if (sc->calibcmd[idx].buf == NULL) {
2760		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2761		    "not enough memory for calibration result %d\n",
2762		    calib->code);
2763		return;
2764	}
2765	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2766	    "saving calibration result code=%d len=%d\n", calib->code, len);
2767	sc->calibcmd[idx].len = len;
2768	memcpy(sc->calibcmd[idx].buf, calib, len);
2769}
2770
2771/*
2772 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2773 * The latter is sent by the firmware after each received beacon.
2774 */
2775static void
2776iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2777    struct iwn_rx_data *data)
2778{
2779	struct iwn_ops *ops = &sc->ops;
2780	struct ifnet *ifp = sc->sc_ifp;
2781	struct ieee80211com *ic = ifp->if_l2com;
2782	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2783	struct iwn_calib_state *calib = &sc->calib;
2784	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2785	int temp;
2786
2787	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2788
2789	/* Ignore statistics received during a scan. */
2790	if (vap->iv_state != IEEE80211_S_RUN ||
2791	    (ic->ic_flags & IEEE80211_F_SCAN)){
2792		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
2793	    __func__);
2794		return;
2795	}
2796
2797	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2798
2799	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n",
2800	    __func__, desc->type);
2801	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2802
2803	/* Test if temperature has changed. */
2804	if (stats->general.temp != sc->rawtemp) {
2805		/* Convert "raw" temperature to degC. */
2806		sc->rawtemp = stats->general.temp;
2807		temp = ops->get_temperature(sc);
2808		DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2809		    __func__, temp);
2810
2811		/* Update TX power if need be (4965AGN only). */
2812		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2813			iwn4965_power_calibration(sc, temp);
2814	}
2815
2816	if (desc->type != IWN_BEACON_STATISTICS)
2817		return;	/* Reply to a statistics request. */
2818
2819	sc->noise = iwn_get_noise(&stats->rx.general);
2820	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2821
2822	/* Test that RSSI and noise are present in stats report. */
2823	if (le32toh(stats->rx.general.flags) != 1) {
2824		DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2825		    "received statistics without RSSI");
2826		return;
2827	}
2828
2829	if (calib->state == IWN_CALIB_STATE_ASSOC)
2830		iwn_collect_noise(sc, &stats->rx.general);
2831	else if (calib->state == IWN_CALIB_STATE_RUN)
2832		iwn_tune_sensitivity(sc, &stats->rx);
2833
2834	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2835}
2836
2837/*
2838 * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2839 * and 5000 adapters have different incompatible TX status formats.
2840 */
2841static void
2842iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2843    struct iwn_rx_data *data)
2844{
2845	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2846	struct iwn_tx_ring *ring;
2847	int qid;
2848
2849	qid = desc->qid & 0xf;
2850	ring = &sc->txq[qid];
2851
2852	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2853	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2854	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2855	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2856	    le32toh(stat->status));
2857
2858	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2859	if (qid >= sc->firstaggqueue) {
2860		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2861		    &stat->status);
2862	} else {
2863		iwn_tx_done(sc, desc, stat->ackfailcnt,
2864		    le32toh(stat->status) & 0xff);
2865	}
2866}
2867
2868static void
2869iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2870    struct iwn_rx_data *data)
2871{
2872	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2873	struct iwn_tx_ring *ring;
2874	int qid;
2875
2876	qid = desc->qid & 0xf;
2877	ring = &sc->txq[qid];
2878
2879	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2880	    "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2881	    __func__, desc->qid, desc->idx, stat->ackfailcnt,
2882	    stat->btkillcnt, stat->rate, le16toh(stat->duration),
2883	    le32toh(stat->status));
2884
2885#ifdef notyet
2886	/* Reset TX scheduler slot. */
2887	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2888#endif
2889
2890	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2891	if (qid >= sc->firstaggqueue) {
2892		iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2893		    &stat->status);
2894	} else {
2895		iwn_tx_done(sc, desc, stat->ackfailcnt,
2896		    le16toh(stat->status) & 0xff);
2897	}
2898}
2899
2900/*
2901 * Adapter-independent backend for TX_DONE firmware notifications.
2902 */
2903static void
2904iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2905    uint8_t status)
2906{
2907	struct ifnet *ifp = sc->sc_ifp;
2908	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2909	struct iwn_tx_data *data = &ring->data[desc->idx];
2910	struct mbuf *m;
2911	struct ieee80211_node *ni;
2912	struct ieee80211vap *vap;
2913
2914	KASSERT(data->ni != NULL, ("no node"));
2915
2916	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2917
2918	/* Unmap and free mbuf. */
2919	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2920	bus_dmamap_unload(ring->data_dmat, data->map);
2921	m = data->m, data->m = NULL;
2922	ni = data->ni, data->ni = NULL;
2923	vap = ni->ni_vap;
2924
2925	/*
2926	 * Update rate control statistics for the node.
2927	 */
2928	if (status & IWN_TX_FAIL) {
2929		ifp->if_oerrors++;
2930		ieee80211_ratectl_tx_complete(vap, ni,
2931		    IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2932	} else {
2933		ifp->if_opackets++;
2934		ieee80211_ratectl_tx_complete(vap, ni,
2935		    IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2936	}
2937
2938	/*
2939	 * Channels marked for "radar" require traffic to be received
2940	 * to unlock before we can transmit.  Until traffic is seen
2941	 * any attempt to transmit is returned immediately with status
2942	 * set to IWN_TX_FAIL_TX_LOCKED.  Unfortunately this can easily
2943	 * happen on first authenticate after scanning.  To workaround
2944	 * this we ignore a failure of this sort in AUTH state so the
2945	 * 802.11 layer will fall back to using a timeout to wait for
2946	 * the AUTH reply.  This allows the firmware time to see
2947	 * traffic so a subsequent retry of AUTH succeeds.  It's
2948	 * unclear why the firmware does not maintain state for
2949	 * channels recently visited as this would allow immediate
2950	 * use of the channel after a scan (where we see traffic).
2951	 */
2952	if (status == IWN_TX_FAIL_TX_LOCKED &&
2953	    ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2954		ieee80211_tx_complete(ni, m, 0);
2955	else
2956		ieee80211_tx_complete(ni, m,
2957		    (status & IWN_TX_FAIL) != 0);
2958
2959	sc->sc_tx_timer = 0;
2960	if (--ring->queued < IWN_TX_RING_LOMARK) {
2961		sc->qfullmsk &= ~(1 << ring->qid);
2962		if (sc->qfullmsk == 0 &&
2963		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2964			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2965			iwn_start_locked(ifp);
2966		}
2967	}
2968
2969	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2970
2971}
2972
2973/*
2974 * Process a "command done" firmware notification.  This is where we wakeup
2975 * processes waiting for a synchronous command completion.
2976 */
2977static void
2978iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2979{
2980	struct iwn_tx_ring *ring = &sc->txq[4];
2981	struct iwn_tx_data *data;
2982
2983	if ((desc->qid & 0xf) != 4)
2984		return;	/* Not a command ack. */
2985
2986	data = &ring->data[desc->idx];
2987
2988	/* If the command was mapped in an mbuf, free it. */
2989	if (data->m != NULL) {
2990		bus_dmamap_sync(ring->data_dmat, data->map,
2991		    BUS_DMASYNC_POSTWRITE);
2992		bus_dmamap_unload(ring->data_dmat, data->map);
2993		m_freem(data->m);
2994		data->m = NULL;
2995	}
2996	wakeup(&ring->desc[desc->idx]);
2997}
2998
2999static void
3000iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
3001    void *stat)
3002{
3003	struct iwn_ops *ops = &sc->ops;
3004	struct ifnet *ifp = sc->sc_ifp;
3005	struct iwn_tx_ring *ring = &sc->txq[qid];
3006	struct iwn_tx_data *data;
3007	struct mbuf *m;
3008	struct iwn_node *wn;
3009	struct ieee80211_node *ni;
3010	struct ieee80211_tx_ampdu *tap;
3011	uint64_t bitmap;
3012	uint32_t *status = stat;
3013	uint16_t *aggstatus = stat;
3014	uint16_t ssn;
3015	uint8_t tid;
3016	int bit, i, lastidx, *res, seqno, shift, start;
3017
3018	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3019
3020#ifdef NOT_YET
3021	if (nframes == 1) {
3022		if ((*status & 0xff) != 1 && (*status & 0xff) != 2)
3023			printf("ieee80211_send_bar()\n");
3024	}
3025#endif
3026
3027	bitmap = 0;
3028	start = idx;
3029	for (i = 0; i < nframes; i++) {
3030		if (le16toh(aggstatus[i * 2]) & 0xc)
3031			continue;
3032
3033		idx = le16toh(aggstatus[2*i + 1]) & 0xff;
3034		bit = idx - start;
3035		shift = 0;
3036		if (bit >= 64) {
3037			shift = 0x100 - idx + start;
3038			bit = 0;
3039			start = idx;
3040		} else if (bit <= -64)
3041			bit = 0x100 - start + idx;
3042		else if (bit < 0) {
3043			shift = start - idx;
3044			start = idx;
3045			bit = 0;
3046		}
3047		bitmap = bitmap << shift;
3048		bitmap |= 1ULL << bit;
3049	}
3050	tap = sc->qid2tap[qid];
3051	tid = tap->txa_tid;
3052	wn = (void *)tap->txa_ni;
3053	wn->agg[tid].bitmap = bitmap;
3054	wn->agg[tid].startidx = start;
3055	wn->agg[tid].nframes = nframes;
3056
3057	res = NULL;
3058	ssn = 0;
3059	if (!IEEE80211_AMPDU_RUNNING(tap)) {
3060		res = tap->txa_private;
3061		ssn = tap->txa_start & 0xfff;
3062	}
3063
3064	seqno = le32toh(*(status + nframes)) & 0xfff;
3065	for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
3066		data = &ring->data[ring->read];
3067
3068		/* Unmap and free mbuf. */
3069		bus_dmamap_sync(ring->data_dmat, data->map,
3070		    BUS_DMASYNC_POSTWRITE);
3071		bus_dmamap_unload(ring->data_dmat, data->map);
3072		m = data->m, data->m = NULL;
3073		ni = data->ni, data->ni = NULL;
3074
3075		KASSERT(ni != NULL, ("no node"));
3076		KASSERT(m != NULL, ("no mbuf"));
3077
3078		ieee80211_tx_complete(ni, m, 1);
3079
3080		ring->queued--;
3081		ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
3082	}
3083
3084	if (ring->queued == 0 && res != NULL) {
3085		iwn_nic_lock(sc);
3086		ops->ampdu_tx_stop(sc, qid, tid, ssn);
3087		iwn_nic_unlock(sc);
3088		sc->qid2tap[qid] = NULL;
3089		free(res, M_DEVBUF);
3090		return;
3091	}
3092
3093	sc->sc_tx_timer = 0;
3094	if (ring->queued < IWN_TX_RING_LOMARK) {
3095		sc->qfullmsk &= ~(1 << ring->qid);
3096		if (sc->qfullmsk == 0 &&
3097		    (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
3098			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3099			iwn_start_locked(ifp);
3100		}
3101	}
3102
3103	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3104
3105}
3106
3107/*
3108 * Process an INT_FH_RX or INT_SW_RX interrupt.
3109 */
3110static void
3111iwn_notif_intr(struct iwn_softc *sc)
3112{
3113	struct iwn_ops *ops = &sc->ops;
3114	struct ifnet *ifp = sc->sc_ifp;
3115	struct ieee80211com *ic = ifp->if_l2com;
3116	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3117	uint16_t hw;
3118
3119	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
3120	    BUS_DMASYNC_POSTREAD);
3121
3122	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
3123	while (sc->rxq.cur != hw) {
3124		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
3125		struct iwn_rx_desc *desc;
3126
3127		bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3128		    BUS_DMASYNC_POSTREAD);
3129		desc = mtod(data->m, struct iwn_rx_desc *);
3130
3131		DPRINTF(sc, IWN_DEBUG_RECV,
3132		    "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
3133		    __func__, desc->qid & 0xf, desc->idx, desc->flags,
3134		    desc->type, iwn_intr_str(desc->type),
3135		    le16toh(desc->len));
3136
3137		if (!(desc->qid & 0x80))	/* Reply to a command. */
3138			iwn_cmd_done(sc, desc);
3139
3140		switch (desc->type) {
3141		case IWN_RX_PHY:
3142			iwn_rx_phy(sc, desc, data);
3143			break;
3144
3145		case IWN_RX_DONE:		/* 4965AGN only. */
3146		case IWN_MPDU_RX_DONE:
3147			/* An 802.11 frame has been received. */
3148			iwn_rx_done(sc, desc, data);
3149			break;
3150
3151		case IWN_RX_COMPRESSED_BA:
3152			/* A Compressed BlockAck has been received. */
3153			iwn_rx_compressed_ba(sc, desc, data);
3154			break;
3155
3156		case IWN_TX_DONE:
3157			/* An 802.11 frame has been transmitted. */
3158			ops->tx_done(sc, desc, data);
3159			break;
3160
3161		case IWN_RX_STATISTICS:
3162		case IWN_BEACON_STATISTICS:
3163			iwn_rx_statistics(sc, desc, data);
3164			break;
3165
3166		case IWN_BEACON_MISSED:
3167		{
3168			struct iwn_beacon_missed *miss =
3169			    (struct iwn_beacon_missed *)(desc + 1);
3170			int misses;
3171
3172			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3173			    BUS_DMASYNC_POSTREAD);
3174			misses = le32toh(miss->consecutive);
3175
3176			DPRINTF(sc, IWN_DEBUG_STATE,
3177			    "%s: beacons missed %d/%d\n", __func__,
3178			    misses, le32toh(miss->total));
3179			/*
3180			 * If more than 5 consecutive beacons are missed,
3181			 * reinitialize the sensitivity state machine.
3182			 */
3183			if (vap->iv_state == IEEE80211_S_RUN &&
3184			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3185				if (misses > 5)
3186					(void)iwn_init_sensitivity(sc);
3187				if (misses >= vap->iv_bmissthreshold) {
3188					IWN_UNLOCK(sc);
3189					ieee80211_beacon_miss(ic);
3190					IWN_LOCK(sc);
3191				}
3192			}
3193			break;
3194		}
3195		case IWN_UC_READY:
3196		{
3197			struct iwn_ucode_info *uc =
3198			    (struct iwn_ucode_info *)(desc + 1);
3199
3200			/* The microcontroller is ready. */
3201			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3202			    BUS_DMASYNC_POSTREAD);
3203			DPRINTF(sc, IWN_DEBUG_RESET,
3204			    "microcode alive notification version=%d.%d "
3205			    "subtype=%x alive=%x\n", uc->major, uc->minor,
3206			    uc->subtype, le32toh(uc->valid));
3207
3208			if (le32toh(uc->valid) != 1) {
3209				device_printf(sc->sc_dev,
3210				    "microcontroller initialization failed");
3211				break;
3212			}
3213			if (uc->subtype == IWN_UCODE_INIT) {
3214				/* Save microcontroller report. */
3215				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3216			}
3217			/* Save the address of the error log in SRAM. */
3218			sc->errptr = le32toh(uc->errptr);
3219			break;
3220		}
3221		case IWN_STATE_CHANGED:
3222		{
3223			/*
3224			 * State change allows hardware switch change to be
3225			 * noted. However, we handle this in iwn_intr as we
3226			 * get both the enable/disble intr.
3227			 */
3228			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3229			    BUS_DMASYNC_POSTREAD);
3230#ifdef	IWN_DEBUG
3231			uint32_t *status = (uint32_t *)(desc + 1);
3232			DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
3233			    le32toh(*status));
3234#endif
3235			break;
3236		}
3237		case IWN_START_SCAN:
3238		{
3239			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3240			    BUS_DMASYNC_POSTREAD);
3241#ifdef	IWN_DEBUG
3242			struct iwn_start_scan *scan =
3243			    (struct iwn_start_scan *)(desc + 1);
3244			DPRINTF(sc, IWN_DEBUG_ANY,
3245			    "%s: scanning channel %d status %x\n",
3246			    __func__, scan->chan, le32toh(scan->status));
3247#endif
3248			break;
3249		}
3250		case IWN_STOP_SCAN:
3251		{
3252			bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3253			    BUS_DMASYNC_POSTREAD);
3254#ifdef	IWN_DEBUG
3255			struct iwn_stop_scan *scan =
3256			    (struct iwn_stop_scan *)(desc + 1);
3257			DPRINTF(sc, IWN_DEBUG_STATE,
3258			    "scan finished nchan=%d status=%d chan=%d\n",
3259			    scan->nchan, scan->status, scan->chan);
3260#endif
3261
3262			IWN_UNLOCK(sc);
3263			ieee80211_scan_next(vap);
3264			IWN_LOCK(sc);
3265			break;
3266		}
3267		case IWN5000_CALIBRATION_RESULT:
3268			iwn5000_rx_calib_results(sc, desc, data);
3269			break;
3270
3271		case IWN5000_CALIBRATION_DONE:
3272			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3273			wakeup(sc);
3274			break;
3275		}
3276
3277		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3278	}
3279
3280	/* Tell the firmware what we have processed. */
3281	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3282	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3283}
3284
3285/*
3286 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3287 * from power-down sleep mode.
3288 */
3289static void
3290iwn_wakeup_intr(struct iwn_softc *sc)
3291{
3292	int qid;
3293
3294	DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3295	    __func__);
3296
3297	/* Wakeup RX and TX rings. */
3298	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3299	for (qid = 0; qid < sc->ntxqs; qid++) {
3300		struct iwn_tx_ring *ring = &sc->txq[qid];
3301		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3302	}
3303}
3304
3305static void
3306iwn_rftoggle_intr(struct iwn_softc *sc)
3307{
3308	struct ifnet *ifp = sc->sc_ifp;
3309	struct ieee80211com *ic = ifp->if_l2com;
3310	uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3311
3312	IWN_LOCK_ASSERT(sc);
3313
3314	device_printf(sc->sc_dev, "RF switch: radio %s\n",
3315	    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3316	if (tmp & IWN_GP_CNTRL_RFKILL)
3317		ieee80211_runtask(ic, &sc->sc_radioon_task);
3318	else
3319		ieee80211_runtask(ic, &sc->sc_radiooff_task);
3320}
3321
3322/*
3323 * Dump the error log of the firmware when a firmware panic occurs.  Although
3324 * we can't debug the firmware because it is neither open source nor free, it
3325 * can help us to identify certain classes of problems.
3326 */
3327static void
3328iwn_fatal_intr(struct iwn_softc *sc)
3329{
3330	struct iwn_fw_dump dump;
3331	int i;
3332
3333	IWN_LOCK_ASSERT(sc);
3334
3335	/* Force a complete recalibration on next init. */
3336	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3337
3338	/* Check that the error log address is valid. */
3339	if (sc->errptr < IWN_FW_DATA_BASE ||
3340	    sc->errptr + sizeof (dump) >
3341	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3342		printf("%s: bad firmware error log address 0x%08x\n", __func__,
3343		    sc->errptr);
3344		return;
3345	}
3346	if (iwn_nic_lock(sc) != 0) {
3347		printf("%s: could not read firmware error log\n", __func__);
3348		return;
3349	}
3350	/* Read firmware error log from SRAM. */
3351	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3352	    sizeof (dump) / sizeof (uint32_t));
3353	iwn_nic_unlock(sc);
3354
3355	if (dump.valid == 0) {
3356		printf("%s: firmware error log is empty\n", __func__);
3357		return;
3358	}
3359	printf("firmware error log:\n");
3360	printf("  error type      = \"%s\" (0x%08X)\n",
3361	    (dump.id < nitems(iwn_fw_errmsg)) ?
3362		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3363	    dump.id);
3364	printf("  program counter = 0x%08X\n", dump.pc);
3365	printf("  source line     = 0x%08X\n", dump.src_line);
3366	printf("  error data      = 0x%08X%08X\n",
3367	    dump.error_data[0], dump.error_data[1]);
3368	printf("  branch link     = 0x%08X%08X\n",
3369	    dump.branch_link[0], dump.branch_link[1]);
3370	printf("  interrupt link  = 0x%08X%08X\n",
3371	    dump.interrupt_link[0], dump.interrupt_link[1]);
3372	printf("  time            = %u\n", dump.time[0]);
3373
3374	/* Dump driver status (TX and RX rings) while we're here. */
3375	printf("driver status:\n");
3376	for (i = 0; i < sc->ntxqs; i++) {
3377		struct iwn_tx_ring *ring = &sc->txq[i];
3378		printf("  tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3379		    i, ring->qid, ring->cur, ring->queued);
3380	}
3381	printf("  rx ring: cur=%d\n", sc->rxq.cur);
3382}
3383
3384static void
3385iwn_intr(void *arg)
3386{
3387	struct iwn_softc *sc = arg;
3388	struct ifnet *ifp = sc->sc_ifp;
3389	uint32_t r1, r2, tmp;
3390
3391	IWN_LOCK(sc);
3392
3393	/* Disable interrupts. */
3394	IWN_WRITE(sc, IWN_INT_MASK, 0);
3395
3396	/* Read interrupts from ICT (fast) or from registers (slow). */
3397	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3398		tmp = 0;
3399		while (sc->ict[sc->ict_cur] != 0) {
3400			tmp |= sc->ict[sc->ict_cur];
3401			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3402			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3403		}
3404		tmp = le32toh(tmp);
3405		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3406			tmp = 0;
3407		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3408			tmp |= 0x8000;
3409		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3410		r2 = 0;	/* Unused. */
3411	} else {
3412		r1 = IWN_READ(sc, IWN_INT);
3413		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3414			return;	/* Hardware gone! */
3415		r2 = IWN_READ(sc, IWN_FH_INT);
3416	}
3417
3418	DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
3419    , r1, r2);
3420
3421	if (r1 == 0 && r2 == 0)
3422		goto done;	/* Interrupt not for us. */
3423
3424	/* Acknowledge interrupts. */
3425	IWN_WRITE(sc, IWN_INT, r1);
3426	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3427		IWN_WRITE(sc, IWN_FH_INT, r2);
3428
3429	if (r1 & IWN_INT_RF_TOGGLED) {
3430		iwn_rftoggle_intr(sc);
3431		goto done;
3432	}
3433	if (r1 & IWN_INT_CT_REACHED) {
3434		device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
3435		    __func__);
3436	}
3437	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3438		device_printf(sc->sc_dev, "%s: fatal firmware error\n",
3439		    __func__);
3440#ifdef	IWN_DEBUG
3441		iwn_debug_register(sc);
3442#endif
3443		/* Dump firmware error log and stop. */
3444		iwn_fatal_intr(sc);
3445		ifp->if_flags &= ~IFF_UP;
3446		iwn_stop_locked(sc);
3447		goto done;
3448	}
3449	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3450	    (r2 & IWN_FH_INT_RX)) {
3451		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3452			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3453				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3454			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3455			    IWN_INT_PERIODIC_DIS);
3456			iwn_notif_intr(sc);
3457			if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3458				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3459				    IWN_INT_PERIODIC_ENA);
3460			}
3461		} else
3462			iwn_notif_intr(sc);
3463	}
3464
3465	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3466		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3467			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3468		wakeup(sc);	/* FH DMA transfer completed. */
3469	}
3470
3471	if (r1 & IWN_INT_ALIVE)
3472		wakeup(sc);	/* Firmware is alive. */
3473
3474	if (r1 & IWN_INT_WAKEUP)
3475		iwn_wakeup_intr(sc);
3476
3477done:
3478	/* Re-enable interrupts. */
3479	if (ifp->if_flags & IFF_UP)
3480		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3481
3482	IWN_UNLOCK(sc);
3483}
3484
3485/*
3486 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3487 * 5000 adapters use a slightly different format).
3488 */
3489static void
3490iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3491    uint16_t len)
3492{
3493	uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3494
3495	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
3496
3497	*w = htole16(len + 8);
3498	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3499	    BUS_DMASYNC_PREWRITE);
3500	if (idx < IWN_SCHED_WINSZ) {
3501		*(w + IWN_TX_RING_COUNT) = *w;
3502		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3503		    BUS_DMASYNC_PREWRITE);
3504	}
3505}
3506
3507static void
3508iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3509    uint16_t len)
3510{
3511	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3512
3513	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
3514
3515	*w = htole16(id << 12 | (len + 8));
3516	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3517	    BUS_DMASYNC_PREWRITE);
3518	if (idx < IWN_SCHED_WINSZ) {
3519		*(w + IWN_TX_RING_COUNT) = *w;
3520		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3521		    BUS_DMASYNC_PREWRITE);
3522	}
3523}
3524
3525#ifdef notyet
3526static void
3527iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3528{
3529	uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3530
3531	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
3532
3533	*w = (*w & htole16(0xf000)) | htole16(1);
3534	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3535	    BUS_DMASYNC_PREWRITE);
3536	if (idx < IWN_SCHED_WINSZ) {
3537		*(w + IWN_TX_RING_COUNT) = *w;
3538		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3539		    BUS_DMASYNC_PREWRITE);
3540	}
3541}
3542#endif
3543
3544static int
3545iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3546{
3547	struct iwn_ops *ops = &sc->ops;
3548	const struct ieee80211_txparam *tp;
3549	struct ieee80211vap *vap = ni->ni_vap;
3550	struct ieee80211com *ic = ni->ni_ic;
3551	struct iwn_node *wn = (void *)ni;
3552	struct iwn_tx_ring *ring;
3553	struct iwn_tx_desc *desc;
3554	struct iwn_tx_data *data;
3555	struct iwn_tx_cmd *cmd;
3556	struct iwn_cmd_data *tx;
3557	struct ieee80211_frame *wh;
3558	struct ieee80211_key *k = NULL;
3559	struct mbuf *m1;
3560	uint32_t flags;
3561	uint16_t qos;
3562	u_int hdrlen;
3563	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3564	uint8_t tid, ridx, txant, type;
3565	int ac, i, totlen, error, pad, nsegs = 0, rate;
3566
3567	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3568
3569	IWN_LOCK_ASSERT(sc);
3570
3571	wh = mtod(m, struct ieee80211_frame *);
3572	hdrlen = ieee80211_anyhdrsize(wh);
3573	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3574
3575	/* Select EDCA Access Category and TX ring for this frame. */
3576	if (IEEE80211_QOS_HAS_SEQ(wh)) {
3577		qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
3578		tid = qos & IEEE80211_QOS_TID;
3579	} else {
3580		qos = 0;
3581		tid = 0;
3582	}
3583	ac = M_WME_GETAC(m);
3584	if (m->m_flags & M_AMPDU_MPDU) {
3585		struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
3586
3587		if (!IEEE80211_AMPDU_RUNNING(tap)) {
3588			m_freem(m);
3589			return EINVAL;
3590		}
3591
3592		ac = *(int *)tap->txa_private;
3593		*(uint16_t *)wh->i_seq =
3594		    htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT);
3595		ni->ni_txseqs[tid]++;
3596	}
3597	ring = &sc->txq[ac];
3598	desc = &ring->desc[ring->cur];
3599	data = &ring->data[ring->cur];
3600
3601	/* Choose a TX rate index. */
3602	tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
3603	if (type == IEEE80211_FC0_TYPE_MGT)
3604		rate = tp->mgmtrate;
3605	else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3606		rate = tp->mcastrate;
3607	else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
3608		rate = tp->ucastrate;
3609	else {
3610		/* XXX pass pktlen */
3611		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3612		rate = ni->ni_txrate;
3613	}
3614	ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
3615	    rate & IEEE80211_RATE_VAL);
3616
3617	/* Encrypt the frame if need be. */
3618	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3619		/* Retrieve key for TX. */
3620		k = ieee80211_crypto_encap(ni, m);
3621		if (k == NULL) {
3622			m_freem(m);
3623			return ENOBUFS;
3624		}
3625		/* 802.11 header may have moved. */
3626		wh = mtod(m, struct ieee80211_frame *);
3627	}
3628	totlen = m->m_pkthdr.len;
3629
3630	if (ieee80211_radiotap_active_vap(vap)) {
3631		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3632
3633		tap->wt_flags = 0;
3634		tap->wt_rate = rate;
3635		if (k != NULL)
3636			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3637
3638		ieee80211_radiotap_tx(vap, m);
3639	}
3640
3641	/* Prepare TX firmware command. */
3642	cmd = &ring->cmd[ring->cur];
3643	cmd->code = IWN_CMD_TX_DATA;
3644	cmd->flags = 0;
3645	cmd->qid = ring->qid;
3646	cmd->idx = ring->cur;
3647
3648	tx = (struct iwn_cmd_data *)cmd->data;
3649	/* NB: No need to clear tx, all fields are reinitialized here. */
3650	tx->scratch = 0;	/* clear "scratch" area */
3651
3652	flags = 0;
3653	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3654		/* Unicast frame, check if an ACK is expected. */
3655		if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
3656		    IEEE80211_QOS_ACKPOLICY_NOACK)
3657			flags |= IWN_TX_NEED_ACK;
3658	}
3659	if ((wh->i_fc[0] &
3660	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3661	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3662		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
3663
3664	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3665		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3666
3667	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3668	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3669		/* NB: Group frames are sent using CCK in 802.11b/g. */
3670		if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
3671			flags |= IWN_TX_NEED_RTS;
3672		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3673		    ridx >= IWN_RIDX_OFDM6) {
3674			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3675				flags |= IWN_TX_NEED_CTS;
3676			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3677				flags |= IWN_TX_NEED_RTS;
3678		}
3679		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3680			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3681				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3682				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3683				flags |= IWN_TX_NEED_PROTECTION;
3684			} else
3685				flags |= IWN_TX_FULL_TXOP;
3686		}
3687	}
3688
3689	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3690	    type != IEEE80211_FC0_TYPE_DATA)
3691		tx->id = sc->broadcast_id;
3692	else
3693		tx->id = wn->id;
3694
3695	if (type == IEEE80211_FC0_TYPE_MGT) {
3696		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3697
3698		/* Tell HW to set timestamp in probe responses. */
3699		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3700			flags |= IWN_TX_INSERT_TSTAMP;
3701		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3702		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3703			tx->timeout = htole16(3);
3704		else
3705			tx->timeout = htole16(2);
3706	} else
3707		tx->timeout = htole16(0);
3708
3709	if (hdrlen & 3) {
3710		/* First segment length must be a multiple of 4. */
3711		flags |= IWN_TX_NEED_PADDING;
3712		pad = 4 - (hdrlen & 3);
3713	} else
3714		pad = 0;
3715
3716	tx->len = htole16(totlen);
3717	tx->tid = tid;
3718	tx->rts_ntries = 60;
3719	tx->data_ntries = 15;
3720	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3721	tx->rate = iwn_rate_to_plcp(sc, ni, rate);
3722	if (tx->id == sc->broadcast_id) {
3723		/* Group or management frame. */
3724		tx->linkq = 0;
3725		/* XXX Alternate between antenna A and B? */
3726		txant = IWN_LSB(sc->txchainmask);
3727		tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3728	} else {
3729		tx->linkq = ni->ni_rates.rs_nrates - ridx - 1;
3730		flags |= IWN_TX_LINKQ;	/* enable MRR */
3731	}
3732	/* Set physical address of "scratch area". */
3733	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3734	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3735
3736	/* Copy 802.11 header in TX command. */
3737	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3738
3739	/* Trim 802.11 header. */
3740	m_adj(m, hdrlen);
3741	tx->security = 0;
3742	tx->flags = htole32(flags);
3743
3744	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3745	    &nsegs, BUS_DMA_NOWAIT);
3746	if (error != 0) {
3747		if (error != EFBIG) {
3748			device_printf(sc->sc_dev,
3749			    "%s: can't map mbuf (error %d)\n", __func__, error);
3750			m_freem(m);
3751			return error;
3752		}
3753		/* Too many DMA segments, linearize mbuf. */
3754		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3755		if (m1 == NULL) {
3756			device_printf(sc->sc_dev,
3757			    "%s: could not defrag mbuf\n", __func__);
3758			m_freem(m);
3759			return ENOBUFS;
3760		}
3761		m = m1;
3762
3763		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3764		    segs, &nsegs, BUS_DMA_NOWAIT);
3765		if (error != 0) {
3766			device_printf(sc->sc_dev,
3767			    "%s: can't map mbuf (error %d)\n", __func__, error);
3768			m_freem(m);
3769			return error;
3770		}
3771	}
3772
3773	data->m = m;
3774	data->ni = ni;
3775
3776	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3777	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3778
3779	/* Fill TX descriptor. */
3780	desc->nsegs = 1;
3781	if (m->m_len != 0)
3782		desc->nsegs += nsegs;
3783	/* First DMA segment is used by the TX command. */
3784	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3785	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3786	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3787	/* Other DMA segments are for data payload. */
3788	seg = &segs[0];
3789	for (i = 1; i <= nsegs; i++) {
3790		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3791		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
3792		    seg->ds_len << 4);
3793		seg++;
3794	}
3795
3796	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3797	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3798	    BUS_DMASYNC_PREWRITE);
3799	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3800	    BUS_DMASYNC_PREWRITE);
3801
3802	/* Update TX scheduler. */
3803	if (ring->qid >= sc->firstaggqueue)
3804		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3805
3806	/* Kick TX ring. */
3807	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3808	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3809
3810	/* Mark TX ring as full if we reach a certain threshold. */
3811	if (++ring->queued > IWN_TX_RING_HIMARK)
3812		sc->qfullmsk |= 1 << ring->qid;
3813
3814	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3815
3816	return 0;
3817}
3818
3819static int
3820iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3821    struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
3822{
3823	struct iwn_ops *ops = &sc->ops;
3824	struct ifnet *ifp = sc->sc_ifp;
3825	struct ieee80211vap *vap = ni->ni_vap;
3826	struct ieee80211com *ic = ifp->if_l2com;
3827	struct iwn_tx_cmd *cmd;
3828	struct iwn_cmd_data *tx;
3829	struct ieee80211_frame *wh;
3830	struct iwn_tx_ring *ring;
3831	struct iwn_tx_desc *desc;
3832	struct iwn_tx_data *data;
3833	struct mbuf *m1;
3834	bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3835	uint32_t flags;
3836	u_int hdrlen;
3837	int ac, totlen, error, pad, nsegs = 0, i, rate;
3838	uint8_t ridx, type, txant;
3839
3840	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3841
3842	IWN_LOCK_ASSERT(sc);
3843
3844	wh = mtod(m, struct ieee80211_frame *);
3845	hdrlen = ieee80211_anyhdrsize(wh);
3846	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3847
3848	ac = params->ibp_pri & 3;
3849
3850	ring = &sc->txq[ac];
3851	desc = &ring->desc[ring->cur];
3852	data = &ring->data[ring->cur];
3853
3854	/* Choose a TX rate index. */
3855	rate = params->ibp_rate0;
3856	ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
3857	    rate & IEEE80211_RATE_VAL);
3858	if (ridx == (uint8_t)-1) {
3859		/* XXX fall back to mcast/mgmt rate? */
3860		m_freem(m);
3861		return EINVAL;
3862	}
3863
3864	totlen = m->m_pkthdr.len;
3865
3866	/* Prepare TX firmware command. */
3867	cmd = &ring->cmd[ring->cur];
3868	cmd->code = IWN_CMD_TX_DATA;
3869	cmd->flags = 0;
3870	cmd->qid = ring->qid;
3871	cmd->idx = ring->cur;
3872
3873	tx = (struct iwn_cmd_data *)cmd->data;
3874	/* NB: No need to clear tx, all fields are reinitialized here. */
3875	tx->scratch = 0;	/* clear "scratch" area */
3876
3877	flags = 0;
3878	if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3879		flags |= IWN_TX_NEED_ACK;
3880	if (params->ibp_flags & IEEE80211_BPF_RTS) {
3881		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3882			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3883			flags &= ~IWN_TX_NEED_RTS;
3884			flags |= IWN_TX_NEED_PROTECTION;
3885		} else
3886			flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3887	}
3888	if (params->ibp_flags & IEEE80211_BPF_CTS) {
3889		if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3890			/* 5000 autoselects RTS/CTS or CTS-to-self. */
3891			flags &= ~IWN_TX_NEED_CTS;
3892			flags |= IWN_TX_NEED_PROTECTION;
3893		} else
3894			flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3895	}
3896	if (type == IEEE80211_FC0_TYPE_MGT) {
3897		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3898
3899		/* Tell HW to set timestamp in probe responses. */
3900		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3901			flags |= IWN_TX_INSERT_TSTAMP;
3902
3903		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3904		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3905			tx->timeout = htole16(3);
3906		else
3907			tx->timeout = htole16(2);
3908	} else
3909		tx->timeout = htole16(0);
3910
3911	if (hdrlen & 3) {
3912		/* First segment length must be a multiple of 4. */
3913		flags |= IWN_TX_NEED_PADDING;
3914		pad = 4 - (hdrlen & 3);
3915	} else
3916		pad = 0;
3917
3918	if (ieee80211_radiotap_active_vap(vap)) {
3919		struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3920
3921		tap->wt_flags = 0;
3922		tap->wt_rate = rate;
3923
3924		ieee80211_radiotap_tx(vap, m);
3925	}
3926
3927	tx->len = htole16(totlen);
3928	tx->tid = 0;
3929	tx->id = sc->broadcast_id;
3930	tx->rts_ntries = params->ibp_try1;
3931	tx->data_ntries = params->ibp_try0;
3932	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3933
3934	/* XXX should just use  iwn_rate_to_plcp() */
3935	tx->rate = htole32(rate2plcp(rate));
3936	if (ridx < IWN_RIDX_OFDM6 &&
3937	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
3938		tx->rate |= htole32(IWN_RFLAG_CCK);
3939
3940	/* Group or management frame. */
3941	tx->linkq = 0;
3942	txant = IWN_LSB(sc->txchainmask);
3943	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3944
3945	/* Set physical address of "scratch area". */
3946	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3947	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3948
3949	/* Copy 802.11 header in TX command. */
3950	memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3951
3952	/* Trim 802.11 header. */
3953	m_adj(m, hdrlen);
3954	tx->security = 0;
3955	tx->flags = htole32(flags);
3956
3957	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3958	    &nsegs, BUS_DMA_NOWAIT);
3959	if (error != 0) {
3960		if (error != EFBIG) {
3961			device_printf(sc->sc_dev,
3962			    "%s: can't map mbuf (error %d)\n", __func__, error);
3963			m_freem(m);
3964			return error;
3965		}
3966		/* Too many DMA segments, linearize mbuf. */
3967		m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3968		if (m1 == NULL) {
3969			device_printf(sc->sc_dev,
3970			    "%s: could not defrag mbuf\n", __func__);
3971			m_freem(m);
3972			return ENOBUFS;
3973		}
3974		m = m1;
3975
3976		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3977		    segs, &nsegs, BUS_DMA_NOWAIT);
3978		if (error != 0) {
3979			device_printf(sc->sc_dev,
3980			    "%s: can't map mbuf (error %d)\n", __func__, error);
3981			m_freem(m);
3982			return error;
3983		}
3984	}
3985
3986	data->m = m;
3987	data->ni = ni;
3988
3989	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3990	    __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3991
3992	/* Fill TX descriptor. */
3993	desc->nsegs = 1;
3994	if (m->m_len != 0)
3995		desc->nsegs += nsegs;
3996	/* First DMA segment is used by the TX command. */
3997	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3998	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3999	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
4000	/* Other DMA segments are for data payload. */
4001	seg = &segs[0];
4002	for (i = 1; i <= nsegs; i++) {
4003		desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4004		desc->segs[i].len  = htole16(IWN_HIADDR(seg->ds_addr) |
4005		    seg->ds_len << 4);
4006		seg++;
4007	}
4008
4009	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4010	bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4011	    BUS_DMASYNC_PREWRITE);
4012	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4013	    BUS_DMASYNC_PREWRITE);
4014
4015	/* Update TX scheduler. */
4016	if (ring->qid >= sc->firstaggqueue)
4017		ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4018
4019	/* Kick TX ring. */
4020	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4021	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4022
4023	/* Mark TX ring as full if we reach a certain threshold. */
4024	if (++ring->queued > IWN_TX_RING_HIMARK)
4025		sc->qfullmsk |= 1 << ring->qid;
4026
4027	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4028
4029	return 0;
4030}
4031
4032static int
4033iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4034    const struct ieee80211_bpf_params *params)
4035{
4036	struct ieee80211com *ic = ni->ni_ic;
4037	struct ifnet *ifp = ic->ic_ifp;
4038	struct iwn_softc *sc = ifp->if_softc;
4039	int error = 0;
4040
4041	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4042
4043	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4044		ieee80211_free_node(ni);
4045		m_freem(m);
4046		return ENETDOWN;
4047	}
4048
4049	IWN_LOCK(sc);
4050	if (params == NULL) {
4051		/*
4052		 * Legacy path; interpret frame contents to decide
4053		 * precisely how to send the frame.
4054		 */
4055		error = iwn_tx_data(sc, m, ni);
4056	} else {
4057		/*
4058		 * Caller supplied explicit parameters to use in
4059		 * sending the frame.
4060		 */
4061		error = iwn_tx_data_raw(sc, m, ni, params);
4062	}
4063	if (error != 0) {
4064		/* NB: m is reclaimed on tx failure */
4065		ieee80211_free_node(ni);
4066		ifp->if_oerrors++;
4067	}
4068	sc->sc_tx_timer = 5;
4069
4070	IWN_UNLOCK(sc);
4071
4072	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4073
4074	return error;
4075}
4076
4077static void
4078iwn_start(struct ifnet *ifp)
4079{
4080	struct iwn_softc *sc = ifp->if_softc;
4081
4082	IWN_LOCK(sc);
4083	iwn_start_locked(ifp);
4084	IWN_UNLOCK(sc);
4085}
4086
4087static void
4088iwn_start_locked(struct ifnet *ifp)
4089{
4090	struct iwn_softc *sc = ifp->if_softc;
4091	struct ieee80211_node *ni;
4092	struct mbuf *m;
4093
4094	IWN_LOCK_ASSERT(sc);
4095
4096	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
4097	    (ifp->if_drv_flags & IFF_DRV_OACTIVE))
4098		return;
4099
4100	for (;;) {
4101		if (sc->qfullmsk != 0) {
4102			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4103			break;
4104		}
4105		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
4106		if (m == NULL)
4107			break;
4108		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4109		if (iwn_tx_data(sc, m, ni) != 0) {
4110			ieee80211_free_node(ni);
4111			ifp->if_oerrors++;
4112			continue;
4113		}
4114		sc->sc_tx_timer = 5;
4115	}
4116}
4117
4118static void
4119iwn_watchdog(void *arg)
4120{
4121	struct iwn_softc *sc = arg;
4122	struct ifnet *ifp = sc->sc_ifp;
4123	struct ieee80211com *ic = ifp->if_l2com;
4124
4125	IWN_LOCK_ASSERT(sc);
4126
4127	KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4128
4129	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4130
4131	if (sc->sc_tx_timer > 0) {
4132		if (--sc->sc_tx_timer == 0) {
4133			if_printf(ifp, "device timeout\n");
4134			ieee80211_runtask(ic, &sc->sc_reinit_task);
4135			return;
4136		}
4137	}
4138	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
4139}
4140
4141static int
4142iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4143{
4144	struct iwn_softc *sc = ifp->if_softc;
4145	struct ieee80211com *ic = ifp->if_l2com;
4146	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4147	struct ifreq *ifr = (struct ifreq *) data;
4148	int error = 0, startall = 0, stop = 0;
4149
4150	switch (cmd) {
4151	case SIOCGIFADDR:
4152		error = ether_ioctl(ifp, cmd, data);
4153		break;
4154	case SIOCSIFFLAGS:
4155		IWN_LOCK(sc);
4156		if (ifp->if_flags & IFF_UP) {
4157			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4158				iwn_init_locked(sc);
4159				if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
4160					startall = 1;
4161				else
4162					stop = 1;
4163			}
4164		} else {
4165			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4166				iwn_stop_locked(sc);
4167		}
4168		IWN_UNLOCK(sc);
4169		if (startall)
4170			ieee80211_start_all(ic);
4171		else if (vap != NULL && stop)
4172			ieee80211_stop(vap);
4173		break;
4174	case SIOCGIFMEDIA:
4175		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4176		break;
4177	default:
4178		error = EINVAL;
4179		break;
4180	}
4181	return error;
4182}
4183
4184/*
4185 * Send a command to the firmware.
4186 */
4187static int
4188iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
4189{
4190	struct iwn_tx_ring *ring = &sc->txq[4];
4191	struct iwn_tx_desc *desc;
4192	struct iwn_tx_data *data;
4193	struct iwn_tx_cmd *cmd;
4194	struct mbuf *m;
4195	bus_addr_t paddr;
4196	int totlen, error;
4197
4198	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4199
4200	if (async == 0)
4201		IWN_LOCK_ASSERT(sc);
4202
4203	desc = &ring->desc[ring->cur];
4204	data = &ring->data[ring->cur];
4205	totlen = 4 + size;
4206
4207	if (size > sizeof cmd->data) {
4208		/* Command is too large to fit in a descriptor. */
4209		if (totlen > MCLBYTES)
4210			return EINVAL;
4211		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
4212		if (m == NULL)
4213			return ENOMEM;
4214		cmd = mtod(m, struct iwn_tx_cmd *);
4215		error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
4216		    totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
4217		if (error != 0) {
4218			m_freem(m);
4219			return error;
4220		}
4221		data->m = m;
4222	} else {
4223		cmd = &ring->cmd[ring->cur];
4224		paddr = data->cmd_paddr;
4225	}
4226
4227	cmd->code = code;
4228	cmd->flags = 0;
4229	cmd->qid = ring->qid;
4230	cmd->idx = ring->cur;
4231	memcpy(cmd->data, buf, size);
4232
4233	desc->nsegs = 1;
4234	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
4235	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
4236
4237	DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
4238	    __func__, iwn_intr_str(cmd->code), cmd->code,
4239	    cmd->flags, cmd->qid, cmd->idx);
4240
4241	if (size > sizeof cmd->data) {
4242		bus_dmamap_sync(ring->data_dmat, data->map,
4243		    BUS_DMASYNC_PREWRITE);
4244	} else {
4245		bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4246		    BUS_DMASYNC_PREWRITE);
4247	}
4248	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4249	    BUS_DMASYNC_PREWRITE);
4250
4251	/* Kick command ring. */
4252	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4253	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4254
4255	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4256
4257	return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
4258}
4259
4260static int
4261iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4262{
4263	struct iwn4965_node_info hnode;
4264	caddr_t src, dst;
4265
4266	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4267
4268	/*
4269	 * We use the node structure for 5000 Series internally (it is
4270	 * a superset of the one for 4965AGN). We thus copy the common
4271	 * fields before sending the command.
4272	 */
4273	src = (caddr_t)node;
4274	dst = (caddr_t)&hnode;
4275	memcpy(dst, src, 48);
4276	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4277	memcpy(dst + 48, src + 72, 20);
4278	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4279}
4280
4281static int
4282iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4283{
4284
4285	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4286
4287	/* Direct mapping. */
4288	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4289}
4290
4291static int
4292iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4293{
4294#define	RV(v)	((v) & IEEE80211_RATE_VAL)
4295	struct iwn_node *wn = (void *)ni;
4296	struct ieee80211_rateset *rs = &ni->ni_rates;
4297	struct iwn_cmd_link_quality linkq;
4298	uint8_t txant;
4299	int i, rate, txrate;
4300
4301	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4302
4303	/* Use the first valid TX antenna. */
4304	txant = IWN_LSB(sc->txchainmask);
4305
4306	memset(&linkq, 0, sizeof linkq);
4307	linkq.id = wn->id;
4308	linkq.antmsk_1stream = txant;
4309	linkq.antmsk_2stream = IWN_ANT_AB;
4310	linkq.ampdu_max = 64;
4311	linkq.ampdu_threshold = 3;
4312	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4313
4314	/* Start at highest available bit-rate. */
4315	if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4316		txrate = ni->ni_htrates.rs_nrates - 1;
4317	else
4318		txrate = rs->rs_nrates - 1;
4319	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4320		uint32_t plcp;
4321
4322		if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4323			rate = IEEE80211_RATE_MCS | txrate;
4324		else
4325			rate = RV(rs->rs_rates[txrate]);
4326
4327		/* Do rate -> PLCP config mapping */
4328		plcp = iwn_rate_to_plcp(sc, ni, rate);
4329		linkq.retry[i] = plcp;
4330
4331		/* Special case for dual-stream rates? */
4332		if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
4333		    RV(le32toh(plcp)) > 7)
4334			linkq.mimo = i + 1;
4335
4336		/* Next retry at immediate lower bit-rate. */
4337		if (txrate > 0)
4338			txrate--;
4339	}
4340
4341	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4342
4343	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4344#undef	RV
4345}
4346
4347/*
4348 * Broadcast node is used to send group-addressed and management frames.
4349 */
4350static int
4351iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4352{
4353	struct iwn_ops *ops = &sc->ops;
4354	struct ifnet *ifp = sc->sc_ifp;
4355	struct ieee80211com *ic = ifp->if_l2com;
4356	struct iwn_node_info node;
4357	struct iwn_cmd_link_quality linkq;
4358	uint8_t txant;
4359	int i, error;
4360
4361	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4362
4363	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
4364
4365	memset(&node, 0, sizeof node);
4366	IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
4367	node.id = sc->broadcast_id;
4368	DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
4369	if ((error = ops->add_node(sc, &node, async)) != 0)
4370		return error;
4371
4372	/* Use the first valid TX antenna. */
4373	txant = IWN_LSB(sc->txchainmask);
4374
4375	memset(&linkq, 0, sizeof linkq);
4376	linkq.id = sc->broadcast_id;
4377	linkq.antmsk_1stream = txant;
4378	linkq.antmsk_2stream = IWN_ANT_AB;
4379	linkq.ampdu_max = 64;
4380	linkq.ampdu_threshold = 3;
4381	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4382
4383	/* Use lowest mandatory bit-rate. */
4384	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
4385		linkq.retry[0] = htole32(0xd);
4386	else
4387		linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
4388	linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
4389	/* Use same bit-rate for all TX retries. */
4390	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4391		linkq.retry[i] = linkq.retry[0];
4392	}
4393
4394	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4395
4396	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4397}
4398
4399static int
4400iwn_updateedca(struct ieee80211com *ic)
4401{
4402#define IWN_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
4403	struct iwn_softc *sc = ic->ic_ifp->if_softc;
4404	struct iwn_edca_params cmd;
4405	int aci;
4406
4407	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4408
4409	memset(&cmd, 0, sizeof cmd);
4410	cmd.flags = htole32(IWN_EDCA_UPDATE);
4411	for (aci = 0; aci < WME_NUM_AC; aci++) {
4412		const struct wmeParams *ac =
4413		    &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
4414		cmd.ac[aci].aifsn = ac->wmep_aifsn;
4415		cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
4416		cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
4417		cmd.ac[aci].txoplimit =
4418		    htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
4419	}
4420	IEEE80211_UNLOCK(ic);
4421	IWN_LOCK(sc);
4422	(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4423	IWN_UNLOCK(sc);
4424	IEEE80211_LOCK(ic);
4425
4426	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4427
4428	return 0;
4429#undef IWN_EXP2
4430}
4431
4432static void
4433iwn_update_mcast(struct ifnet *ifp)
4434{
4435	/* Ignore */
4436}
4437
4438static void
4439iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4440{
4441	struct iwn_cmd_led led;
4442
4443	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4444
4445	/* Clear microcode LED ownership. */
4446	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4447
4448	led.which = which;
4449	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4450	led.off = off;
4451	led.on = on;
4452	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4453}
4454
4455/*
4456 * Set the critical temperature at which the firmware will stop the radio
4457 * and notify us.
4458 */
4459static int
4460iwn_set_critical_temp(struct iwn_softc *sc)
4461{
4462	struct iwn_critical_temp crit;
4463	int32_t temp;
4464
4465	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4466
4467	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4468
4469	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4470		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4471	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4472		temp = IWN_CTOK(110);
4473	else
4474		temp = 110;
4475	memset(&crit, 0, sizeof crit);
4476	crit.tempR = htole32(temp);
4477	DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
4478	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4479}
4480
4481static int
4482iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4483{
4484	struct iwn_cmd_timing cmd;
4485	uint64_t val, mod;
4486
4487	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4488
4489	memset(&cmd, 0, sizeof cmd);
4490	memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
4491	cmd.bintval = htole16(ni->ni_intval);
4492	cmd.lintval = htole16(10);
4493
4494	/* Compute remaining time until next beacon. */
4495	val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4496	mod = le64toh(cmd.tstamp) % val;
4497	cmd.binitval = htole32((uint32_t)(val - mod));
4498
4499	DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
4500	    ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
4501
4502	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4503}
4504
4505static void
4506iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4507{
4508	struct ifnet *ifp = sc->sc_ifp;
4509	struct ieee80211com *ic = ifp->if_l2com;
4510
4511	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4512
4513	/* Adjust TX power if need be (delta >= 3 degC). */
4514	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
4515	    __func__, sc->temp, temp);
4516	if (abs(temp - sc->temp) >= 3) {
4517		/* Record temperature of last calibration. */
4518		sc->temp = temp;
4519		(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
4520	}
4521}
4522
4523/*
4524 * Set TX power for current channel (each rate has its own power settings).
4525 * This function takes into account the regulatory information from EEPROM,
4526 * the current temperature and the current voltage.
4527 */
4528static int
4529iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4530    int async)
4531{
4532/* Fixed-point arithmetic division using a n-bit fractional part. */
4533#define fdivround(a, b, n)	\
4534	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4535/* Linear interpolation. */
4536#define interpolate(x, x1, y1, x2, y2, n)	\
4537	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4538
4539	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4540	struct iwn_ucode_info *uc = &sc->ucode_info;
4541	struct iwn4965_cmd_txpower cmd;
4542	struct iwn4965_eeprom_chan_samples *chans;
4543	const uint8_t *rf_gain, *dsp_gain;
4544	int32_t vdiff, tdiff;
4545	int i, c, grp, maxpwr;
4546	uint8_t chan;
4547
4548	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
4549	/* Retrieve current channel from last RXON. */
4550	chan = sc->rxon->chan;
4551	DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
4552	    chan);
4553
4554	memset(&cmd, 0, sizeof cmd);
4555	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4556	cmd.chan = chan;
4557
4558	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4559		maxpwr   = sc->maxpwr5GHz;
4560		rf_gain  = iwn4965_rf_gain_5ghz;
4561		dsp_gain = iwn4965_dsp_gain_5ghz;
4562	} else {
4563		maxpwr   = sc->maxpwr2GHz;
4564		rf_gain  = iwn4965_rf_gain_2ghz;
4565		dsp_gain = iwn4965_dsp_gain_2ghz;
4566	}
4567
4568	/* Compute voltage compensation. */
4569	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4570	if (vdiff > 0)
4571		vdiff *= 2;
4572	if (abs(vdiff) > 2)
4573		vdiff = 0;
4574	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4575	    "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4576	    __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
4577
4578	/* Get channel attenuation group. */
4579	if (chan <= 20)		/* 1-20 */
4580		grp = 4;
4581	else if (chan <= 43)	/* 34-43 */
4582		grp = 0;
4583	else if (chan <= 70)	/* 44-70 */
4584		grp = 1;
4585	else if (chan <= 124)	/* 71-124 */
4586		grp = 2;
4587	else			/* 125-200 */
4588		grp = 3;
4589	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4590	    "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
4591
4592	/* Get channel sub-band. */
4593	for (i = 0; i < IWN_NBANDS; i++)
4594		if (sc->bands[i].lo != 0 &&
4595		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4596			break;
4597	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4598		return EINVAL;
4599	chans = sc->bands[i].chans;
4600	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4601	    "%s: chan %d sub-band=%d\n", __func__, chan, i);
4602
4603	for (c = 0; c < 2; c++) {
4604		uint8_t power, gain, temp;
4605		int maxchpwr, pwr, ridx, idx;
4606
4607		power = interpolate(chan,
4608		    chans[0].num, chans[0].samples[c][1].power,
4609		    chans[1].num, chans[1].samples[c][1].power, 1);
4610		gain  = interpolate(chan,
4611		    chans[0].num, chans[0].samples[c][1].gain,
4612		    chans[1].num, chans[1].samples[c][1].gain, 1);
4613		temp  = interpolate(chan,
4614		    chans[0].num, chans[0].samples[c][1].temp,
4615		    chans[1].num, chans[1].samples[c][1].temp, 1);
4616		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4617		    "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
4618		    __func__, c, power, gain, temp);
4619
4620		/* Compute temperature compensation. */
4621		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4622		DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4623		    "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
4624		    __func__, tdiff, sc->temp, temp);
4625
4626		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4627			/* Convert dBm to half-dBm. */
4628			maxchpwr = sc->maxpwr[chan] * 2;
4629			if ((ridx / 8) & 1)
4630				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4631
4632			pwr = maxpwr;
4633
4634			/* Adjust TX power based on rate. */
4635			if ((ridx % 8) == 5)
4636				pwr -= 15;	/* OFDM48: -7.5dB */
4637			else if ((ridx % 8) == 6)
4638				pwr -= 17;	/* OFDM54: -8.5dB */
4639			else if ((ridx % 8) == 7)
4640				pwr -= 20;	/* OFDM60: -10dB */
4641			else
4642				pwr -= 10;	/* Others: -5dB */
4643
4644			/* Do not exceed channel max TX power. */
4645			if (pwr > maxchpwr)
4646				pwr = maxchpwr;
4647
4648			idx = gain - (pwr - power) - tdiff - vdiff;
4649			if ((ridx / 8) & 1)	/* MIMO */
4650				idx += (int32_t)le32toh(uc->atten[grp][c]);
4651
4652			if (cmd.band == 0)
4653				idx += 9;	/* 5GHz */
4654			if (ridx == IWN_RIDX_MAX)
4655				idx += 5;	/* CCK */
4656
4657			/* Make sure idx stays in a valid range. */
4658			if (idx < 0)
4659				idx = 0;
4660			else if (idx > IWN4965_MAX_PWR_INDEX)
4661				idx = IWN4965_MAX_PWR_INDEX;
4662
4663			DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4664			    "%s: Tx chain %d, rate idx %d: power=%d\n",
4665			    __func__, c, ridx, idx);
4666			cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4667			cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4668		}
4669	}
4670
4671	DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4672	    "%s: set tx power for chan %d\n", __func__, chan);
4673	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4674
4675#undef interpolate
4676#undef fdivround
4677}
4678
4679static int
4680iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4681    int async)
4682{
4683	struct iwn5000_cmd_txpower cmd;
4684
4685	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4686
4687	/*
4688	 * TX power calibration is handled automatically by the firmware
4689	 * for 5000 Series.
4690	 */
4691	memset(&cmd, 0, sizeof cmd);
4692	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4693	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4694	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4695	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
4696	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4697}
4698
4699/*
4700 * Retrieve the maximum RSSI (in dBm) among receivers.
4701 */
4702static int
4703iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4704{
4705	struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4706	uint8_t mask, agc;
4707	int rssi;
4708
4709	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4710
4711	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4712	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
4713
4714	rssi = 0;
4715	if (mask & IWN_ANT_A)
4716		rssi = MAX(rssi, phy->rssi[0]);
4717	if (mask & IWN_ANT_B)
4718		rssi = MAX(rssi, phy->rssi[2]);
4719	if (mask & IWN_ANT_C)
4720		rssi = MAX(rssi, phy->rssi[4]);
4721
4722	DPRINTF(sc, IWN_DEBUG_RECV,
4723	    "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
4724	    mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
4725	    rssi - agc - IWN_RSSI_TO_DBM);
4726	return rssi - agc - IWN_RSSI_TO_DBM;
4727}
4728
4729static int
4730iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4731{
4732	struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4733	uint8_t agc;
4734	int rssi;
4735
4736	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4737
4738	agc = (le32toh(phy->agc) >> 9) & 0x7f;
4739
4740	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4741		   le16toh(phy->rssi[1]) & 0xff);
4742	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4743
4744	DPRINTF(sc, IWN_DEBUG_RECV,
4745	    "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
4746	    phy->rssi[0], phy->rssi[1], phy->rssi[2],
4747	    rssi - agc - IWN_RSSI_TO_DBM);
4748	return rssi - agc - IWN_RSSI_TO_DBM;
4749}
4750
4751/*
4752 * Retrieve the average noise (in dBm) among receivers.
4753 */
4754static int
4755iwn_get_noise(const struct iwn_rx_general_stats *stats)
4756{
4757	int i, total, nbant, noise;
4758
4759	total = nbant = 0;
4760	for (i = 0; i < 3; i++) {
4761		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4762			continue;
4763		total += noise;
4764		nbant++;
4765	}
4766	/* There should be at least one antenna but check anyway. */
4767	return (nbant == 0) ? -127 : (total / nbant) - 107;
4768}
4769
4770/*
4771 * Compute temperature (in degC) from last received statistics.
4772 */
4773static int
4774iwn4965_get_temperature(struct iwn_softc *sc)
4775{
4776	struct iwn_ucode_info *uc = &sc->ucode_info;
4777	int32_t r1, r2, r3, r4, temp;
4778
4779	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4780
4781	r1 = le32toh(uc->temp[0].chan20MHz);
4782	r2 = le32toh(uc->temp[1].chan20MHz);
4783	r3 = le32toh(uc->temp[2].chan20MHz);
4784	r4 = le32toh(sc->rawtemp);
4785
4786	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4787		return 0;
4788
4789	/* Sign-extend 23-bit R4 value to 32-bit. */
4790	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4791	/* Compute temperature in Kelvin. */
4792	temp = (259 * (r4 - r2)) / (r3 - r1);
4793	temp = (temp * 97) / 100 + 8;
4794
4795	DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4796	    IWN_KTOC(temp));
4797	return IWN_KTOC(temp);
4798}
4799
4800static int
4801iwn5000_get_temperature(struct iwn_softc *sc)
4802{
4803	int32_t temp;
4804
4805	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4806
4807	/*
4808	 * Temperature is not used by the driver for 5000 Series because
4809	 * TX power calibration is handled by firmware.
4810	 */
4811	temp = le32toh(sc->rawtemp);
4812	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4813		temp = (temp / -5) + sc->temp_off;
4814		temp = IWN_KTOC(temp);
4815	}
4816	return temp;
4817}
4818
4819/*
4820 * Initialize sensitivity calibration state machine.
4821 */
4822static int
4823iwn_init_sensitivity(struct iwn_softc *sc)
4824{
4825	struct iwn_ops *ops = &sc->ops;
4826	struct iwn_calib_state *calib = &sc->calib;
4827	uint32_t flags;
4828	int error;
4829
4830	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4831
4832	/* Reset calibration state machine. */
4833	memset(calib, 0, sizeof (*calib));
4834	calib->state = IWN_CALIB_STATE_INIT;
4835	calib->cck_state = IWN_CCK_STATE_HIFA;
4836	/* Set initial correlation values. */
4837	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4838	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4839	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4840	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4841	calib->cck_x4      = 125;
4842	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4843	calib->energy_cck  = sc->limits->energy_cck;
4844
4845	/* Write initial sensitivity. */
4846	if ((error = iwn_send_sensitivity(sc)) != 0)
4847		return error;
4848
4849	/* Write initial gains. */
4850	if ((error = ops->init_gains(sc)) != 0)
4851		return error;
4852
4853	/* Request statistics at each beacon interval. */
4854	flags = 0;
4855	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
4856	    __func__);
4857	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4858}
4859
4860/*
4861 * Collect noise and RSSI statistics for the first 20 beacons received
4862 * after association and use them to determine connected antennas and
4863 * to set differential gains.
4864 */
4865static void
4866iwn_collect_noise(struct iwn_softc *sc,
4867    const struct iwn_rx_general_stats *stats)
4868{
4869	struct iwn_ops *ops = &sc->ops;
4870	struct iwn_calib_state *calib = &sc->calib;
4871	struct ifnet *ifp = sc->sc_ifp;
4872	struct ieee80211com *ic = ifp->if_l2com;
4873	uint32_t val;
4874	int i;
4875
4876	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4877
4878	/* Accumulate RSSI and noise for all 3 antennas. */
4879	for (i = 0; i < 3; i++) {
4880		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4881		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4882	}
4883	/* NB: We update differential gains only once after 20 beacons. */
4884	if (++calib->nbeacons < 20)
4885		return;
4886
4887	/* Determine highest average RSSI. */
4888	val = MAX(calib->rssi[0], calib->rssi[1]);
4889	val = MAX(calib->rssi[2], val);
4890
4891	/* Determine which antennas are connected. */
4892	sc->chainmask = sc->rxchainmask;
4893	for (i = 0; i < 3; i++)
4894		if (val - calib->rssi[i] > 15 * 20)
4895			sc->chainmask &= ~(1 << i);
4896	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4897	    "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4898	    __func__, sc->rxchainmask, sc->chainmask);
4899
4900	/* If none of the TX antennas are connected, keep at least one. */
4901	if ((sc->chainmask & sc->txchainmask) == 0)
4902		sc->chainmask |= IWN_LSB(sc->txchainmask);
4903
4904	(void)ops->set_gains(sc);
4905	calib->state = IWN_CALIB_STATE_RUN;
4906
4907#ifdef notyet
4908	/* XXX Disable RX chains with no antennas connected. */
4909	sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4910	(void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
4911#endif
4912
4913	/* Enable power-saving mode if requested by user. */
4914	if (ic->ic_flags & IEEE80211_F_PMGTON)
4915		(void)iwn_set_pslevel(sc, 0, 3, 1);
4916
4917	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4918
4919}
4920
4921static int
4922iwn4965_init_gains(struct iwn_softc *sc)
4923{
4924	struct iwn_phy_calib_gain cmd;
4925
4926	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4927
4928	memset(&cmd, 0, sizeof cmd);
4929	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4930	/* Differential gains initially set to 0 for all 3 antennas. */
4931	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4932	    "%s: setting initial differential gains\n", __func__);
4933	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4934}
4935
4936static int
4937iwn5000_init_gains(struct iwn_softc *sc)
4938{
4939	struct iwn_phy_calib cmd;
4940
4941	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4942
4943	memset(&cmd, 0, sizeof cmd);
4944	cmd.code = sc->reset_noise_gain;
4945	cmd.ngroups = 1;
4946	cmd.isvalid = 1;
4947	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4948	    "%s: setting initial differential gains\n", __func__);
4949	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4950}
4951
4952static int
4953iwn4965_set_gains(struct iwn_softc *sc)
4954{
4955	struct iwn_calib_state *calib = &sc->calib;
4956	struct iwn_phy_calib_gain cmd;
4957	int i, delta, noise;
4958
4959	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4960
4961	/* Get minimal noise among connected antennas. */
4962	noise = INT_MAX;	/* NB: There's at least one antenna. */
4963	for (i = 0; i < 3; i++)
4964		if (sc->chainmask & (1 << i))
4965			noise = MIN(calib->noise[i], noise);
4966
4967	memset(&cmd, 0, sizeof cmd);
4968	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4969	/* Set differential gains for connected antennas. */
4970	for (i = 0; i < 3; i++) {
4971		if (sc->chainmask & (1 << i)) {
4972			/* Compute attenuation (in unit of 1.5dB). */
4973			delta = (noise - (int32_t)calib->noise[i]) / 30;
4974			/* NB: delta <= 0 */
4975			/* Limit to [-4.5dB,0]. */
4976			cmd.gain[i] = MIN(abs(delta), 3);
4977			if (delta < 0)
4978				cmd.gain[i] |= 1 << 2;	/* sign bit */
4979		}
4980	}
4981	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4982	    "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4983	    cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4984	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4985}
4986
4987static int
4988iwn5000_set_gains(struct iwn_softc *sc)
4989{
4990	struct iwn_calib_state *calib = &sc->calib;
4991	struct iwn_phy_calib_gain cmd;
4992	int i, ant, div, delta;
4993
4994	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4995
4996	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
4997	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4998
4999	memset(&cmd, 0, sizeof cmd);
5000	cmd.code = sc->noise_gain;
5001	cmd.ngroups = 1;
5002	cmd.isvalid = 1;
5003	/* Get first available RX antenna as referential. */
5004	ant = IWN_LSB(sc->rxchainmask);
5005	/* Set differential gains for other antennas. */
5006	for (i = ant + 1; i < 3; i++) {
5007		if (sc->chainmask & (1 << i)) {
5008			/* The delta is relative to antenna "ant". */
5009			delta = ((int32_t)calib->noise[ant] -
5010			    (int32_t)calib->noise[i]) / div;
5011			/* Limit to [-4.5dB,+4.5dB]. */
5012			cmd.gain[i - 1] = MIN(abs(delta), 3);
5013			if (delta < 0)
5014				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
5015		}
5016	}
5017	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5018	    "setting differential gains Ant B/C: %x/%x (%x)\n",
5019	    cmd.gain[0], cmd.gain[1], sc->chainmask);
5020	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5021}
5022
5023/*
5024 * Tune RF RX sensitivity based on the number of false alarms detected
5025 * during the last beacon period.
5026 */
5027static void
5028iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
5029{
5030#define inc(val, inc, max)			\
5031	if ((val) < (max)) {			\
5032		if ((val) < (max) - (inc))	\
5033			(val) += (inc);		\
5034		else				\
5035			(val) = (max);		\
5036		needs_update = 1;		\
5037	}
5038#define dec(val, dec, min)			\
5039	if ((val) > (min)) {			\
5040		if ((val) > (min) + (dec))	\
5041			(val) -= (dec);		\
5042		else				\
5043			(val) = (min);		\
5044		needs_update = 1;		\
5045	}
5046
5047	const struct iwn_sensitivity_limits *limits = sc->limits;
5048	struct iwn_calib_state *calib = &sc->calib;
5049	uint32_t val, rxena, fa;
5050	uint32_t energy[3], energy_min;
5051	uint8_t noise[3], noise_ref;
5052	int i, needs_update = 0;
5053
5054	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5055
5056	/* Check that we've been enabled long enough. */
5057	if ((rxena = le32toh(stats->general.load)) == 0){
5058		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__);
5059		return;
5060	}
5061
5062	/* Compute number of false alarms since last call for OFDM. */
5063	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
5064	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
5065	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
5066
5067	/* Save counters values for next call. */
5068	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
5069	calib->fa_ofdm = le32toh(stats->ofdm.fa);
5070
5071	if (fa > 50 * rxena) {
5072		/* High false alarm count, decrease sensitivity. */
5073		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5074		    "%s: OFDM high false alarm count: %u\n", __func__, fa);
5075		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
5076		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
5077		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
5078		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
5079
5080	} else if (fa < 5 * rxena) {
5081		/* Low false alarm count, increase sensitivity. */
5082		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5083		    "%s: OFDM low false alarm count: %u\n", __func__, fa);
5084		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
5085		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
5086		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
5087		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
5088	}
5089
5090	/* Compute maximum noise among 3 receivers. */
5091	for (i = 0; i < 3; i++)
5092		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
5093	val = MAX(noise[0], noise[1]);
5094	val = MAX(noise[2], val);
5095	/* Insert it into our samples table. */
5096	calib->noise_samples[calib->cur_noise_sample] = val;
5097	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
5098
5099	/* Compute maximum noise among last 20 samples. */
5100	noise_ref = calib->noise_samples[0];
5101	for (i = 1; i < 20; i++)
5102		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
5103
5104	/* Compute maximum energy among 3 receivers. */
5105	for (i = 0; i < 3; i++)
5106		energy[i] = le32toh(stats->general.energy[i]);
5107	val = MIN(energy[0], energy[1]);
5108	val = MIN(energy[2], val);
5109	/* Insert it into our samples table. */
5110	calib->energy_samples[calib->cur_energy_sample] = val;
5111	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
5112
5113	/* Compute minimum energy among last 10 samples. */
5114	energy_min = calib->energy_samples[0];
5115	for (i = 1; i < 10; i++)
5116		energy_min = MAX(energy_min, calib->energy_samples[i]);
5117	energy_min += 6;
5118
5119	/* Compute number of false alarms since last call for CCK. */
5120	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
5121	fa += le32toh(stats->cck.fa) - calib->fa_cck;
5122	fa *= 200 * IEEE80211_DUR_TU;	/* 200TU */
5123
5124	/* Save counters values for next call. */
5125	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
5126	calib->fa_cck = le32toh(stats->cck.fa);
5127
5128	if (fa > 50 * rxena) {
5129		/* High false alarm count, decrease sensitivity. */
5130		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5131		    "%s: CCK high false alarm count: %u\n", __func__, fa);
5132		calib->cck_state = IWN_CCK_STATE_HIFA;
5133		calib->low_fa = 0;
5134
5135		if (calib->cck_x4 > 160) {
5136			calib->noise_ref = noise_ref;
5137			if (calib->energy_cck > 2)
5138				dec(calib->energy_cck, 2, energy_min);
5139		}
5140		if (calib->cck_x4 < 160) {
5141			calib->cck_x4 = 161;
5142			needs_update = 1;
5143		} else
5144			inc(calib->cck_x4, 3, limits->max_cck_x4);
5145
5146		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
5147
5148	} else if (fa < 5 * rxena) {
5149		/* Low false alarm count, increase sensitivity. */
5150		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5151		    "%s: CCK low false alarm count: %u\n", __func__, fa);
5152		calib->cck_state = IWN_CCK_STATE_LOFA;
5153		calib->low_fa++;
5154
5155		if (calib->cck_state != IWN_CCK_STATE_INIT &&
5156		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
5157		     calib->low_fa > 100)) {
5158			inc(calib->energy_cck, 2, limits->min_energy_cck);
5159			dec(calib->cck_x4,     3, limits->min_cck_x4);
5160			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
5161		}
5162	} else {
5163		/* Not worth to increase or decrease sensitivity. */
5164		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5165		    "%s: CCK normal false alarm count: %u\n", __func__, fa);
5166		calib->low_fa = 0;
5167		calib->noise_ref = noise_ref;
5168
5169		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
5170			/* Previous interval had many false alarms. */
5171			dec(calib->energy_cck, 8, energy_min);
5172		}
5173		calib->cck_state = IWN_CCK_STATE_INIT;
5174	}
5175
5176	if (needs_update)
5177		(void)iwn_send_sensitivity(sc);
5178
5179	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5180
5181#undef dec
5182#undef inc
5183}
5184
5185static int
5186iwn_send_sensitivity(struct iwn_softc *sc)
5187{
5188	struct iwn_calib_state *calib = &sc->calib;
5189	struct iwn_enhanced_sensitivity_cmd cmd;
5190	int len;
5191
5192	memset(&cmd, 0, sizeof cmd);
5193	len = sizeof (struct iwn_sensitivity_cmd);
5194	cmd.which = IWN_SENSITIVITY_WORKTBL;
5195	/* OFDM modulation. */
5196	cmd.corr_ofdm_x1       = htole16(calib->ofdm_x1);
5197	cmd.corr_ofdm_mrc_x1   = htole16(calib->ofdm_mrc_x1);
5198	cmd.corr_ofdm_x4       = htole16(calib->ofdm_x4);
5199	cmd.corr_ofdm_mrc_x4   = htole16(calib->ofdm_mrc_x4);
5200	cmd.energy_ofdm        = htole16(sc->limits->energy_ofdm);
5201	cmd.energy_ofdm_th     = htole16(62);
5202	/* CCK modulation. */
5203	cmd.corr_cck_x4        = htole16(calib->cck_x4);
5204	cmd.corr_cck_mrc_x4    = htole16(calib->cck_mrc_x4);
5205	cmd.energy_cck         = htole16(calib->energy_cck);
5206	/* Barker modulation: use default values. */
5207	cmd.corr_barker        = htole16(190);
5208	cmd.corr_barker_mrc    = htole16(390);
5209
5210	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5211	    "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
5212	    calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
5213	    calib->ofdm_mrc_x4, calib->cck_x4,
5214	    calib->cck_mrc_x4, calib->energy_cck);
5215
5216	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
5217		goto send;
5218	/* Enhanced sensitivity settings. */
5219	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
5220	cmd.ofdm_det_slope_mrc = htole16(668);
5221	cmd.ofdm_det_icept_mrc = htole16(4);
5222	cmd.ofdm_det_slope     = htole16(486);
5223	cmd.ofdm_det_icept     = htole16(37);
5224	cmd.cck_det_slope_mrc  = htole16(853);
5225	cmd.cck_det_icept_mrc  = htole16(4);
5226	cmd.cck_det_slope      = htole16(476);
5227	cmd.cck_det_icept      = htole16(99);
5228send:
5229	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
5230}
5231
5232/*
5233 * Set STA mode power saving level (between 0 and 5).
5234 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
5235 */
5236static int
5237iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
5238{
5239	struct iwn_pmgt_cmd cmd;
5240	const struct iwn_pmgt *pmgt;
5241	uint32_t max, skip_dtim;
5242	uint32_t reg;
5243	int i;
5244
5245	DPRINTF(sc, IWN_DEBUG_PWRSAVE,
5246	    "%s: dtim=%d, level=%d, async=%d\n",
5247	    __func__,
5248	    dtim,
5249	    level,
5250	    async);
5251
5252	/* Select which PS parameters to use. */
5253	if (dtim <= 2)
5254		pmgt = &iwn_pmgt[0][level];
5255	else if (dtim <= 10)
5256		pmgt = &iwn_pmgt[1][level];
5257	else
5258		pmgt = &iwn_pmgt[2][level];
5259
5260	memset(&cmd, 0, sizeof cmd);
5261	if (level != 0)	/* not CAM */
5262		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
5263	if (level == 5)
5264		cmd.flags |= htole16(IWN_PS_FAST_PD);
5265	/* Retrieve PCIe Active State Power Management (ASPM). */
5266	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
5267	if (!(reg & 0x1))	/* L0s Entry disabled. */
5268		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
5269	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
5270	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
5271
5272	if (dtim == 0) {
5273		dtim = 1;
5274		skip_dtim = 0;
5275	} else
5276		skip_dtim = pmgt->skip_dtim;
5277	if (skip_dtim != 0) {
5278		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
5279		max = pmgt->intval[4];
5280		if (max == (uint32_t)-1)
5281			max = dtim * (skip_dtim + 1);
5282		else if (max > dtim)
5283			max = (max / dtim) * dtim;
5284	} else
5285		max = dtim;
5286	for (i = 0; i < 5; i++)
5287		cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
5288
5289	DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
5290	    level);
5291	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
5292}
5293
5294static int
5295iwn_send_btcoex(struct iwn_softc *sc)
5296{
5297	struct iwn_bluetooth cmd;
5298
5299	memset(&cmd, 0, sizeof cmd);
5300	cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
5301	cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
5302	cmd.max_kill = IWN_BT_MAX_KILL_DEF;
5303	DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
5304	    __func__);
5305	return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
5306}
5307
5308static int
5309iwn_send_advanced_btcoex(struct iwn_softc *sc)
5310{
5311	static const uint32_t btcoex_3wire[12] = {
5312		0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
5313		0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
5314		0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
5315	};
5316	struct iwn6000_btcoex_config btconfig;
5317	struct iwn_btcoex_priotable btprio;
5318	struct iwn_btcoex_prot btprot;
5319	int error, i;
5320
5321	memset(&btconfig, 0, sizeof btconfig);
5322	btconfig.flags = 145;
5323	btconfig.max_kill = 5;
5324	btconfig.bt3_t7_timer = 1;
5325	btconfig.kill_ack = htole32(0xffff0000);
5326	btconfig.kill_cts = htole32(0xffff0000);
5327	btconfig.sample_time = 2;
5328	btconfig.bt3_t2_timer = 0xc;
5329	for (i = 0; i < 12; i++)
5330		btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
5331	btconfig.valid = htole16(0xff);
5332	btconfig.prio_boost = 0xf0;
5333	DPRINTF(sc, IWN_DEBUG_RESET,
5334	    "%s: configuring advanced bluetooth coexistence\n", __func__);
5335	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1);
5336	if (error != 0)
5337		return error;
5338
5339	memset(&btprio, 0, sizeof btprio);
5340	btprio.calib_init1 = 0x6;
5341	btprio.calib_init2 = 0x7;
5342	btprio.calib_periodic_low1 = 0x2;
5343	btprio.calib_periodic_low2 = 0x3;
5344	btprio.calib_periodic_high1 = 0x4;
5345	btprio.calib_periodic_high2 = 0x5;
5346	btprio.dtim = 0x6;
5347	btprio.scan52 = 0x8;
5348	btprio.scan24 = 0xa;
5349	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
5350	    1);
5351	if (error != 0)
5352		return error;
5353
5354	/* Force BT state machine change. */
5355	memset(&btprot, 0, sizeof btprot);
5356	btprot.open = 1;
5357	btprot.type = 1;
5358	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5359	if (error != 0)
5360		return error;
5361	btprot.open = 0;
5362	return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5363}
5364
5365static int
5366iwn5000_runtime_calib(struct iwn_softc *sc)
5367{
5368	struct iwn5000_calib_config cmd;
5369
5370	memset(&cmd, 0, sizeof cmd);
5371	cmd.ucode.once.enable = 0xffffffff;
5372	cmd.ucode.once.start = IWN5000_CALIB_DC;
5373	DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5374	    "%s: configuring runtime calibration\n", __func__);
5375	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5376}
5377
5378static int
5379iwn_config(struct iwn_softc *sc)
5380{
5381	struct iwn_ops *ops = &sc->ops;
5382	struct ifnet *ifp = sc->sc_ifp;
5383	struct ieee80211com *ic = ifp->if_l2com;
5384	uint32_t txmask;
5385	uint16_t rxchain;
5386	int error;
5387
5388	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5389
5390	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5391		/* Set radio temperature sensor offset. */
5392		error = iwn5000_temp_offset_calib(sc);
5393		if (error != 0) {
5394			device_printf(sc->sc_dev,
5395			    "%s: could not set temperature offset\n", __func__);
5396			return error;
5397		}
5398	}
5399
5400	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5401		/* Configure runtime DC calibration. */
5402		error = iwn5000_runtime_calib(sc);
5403		if (error != 0) {
5404			device_printf(sc->sc_dev,
5405			    "%s: could not configure runtime calibration\n",
5406			    __func__);
5407			return error;
5408		}
5409	}
5410
5411	/* Configure valid TX chains for >=5000 Series. */
5412	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5413		txmask = htole32(sc->txchainmask);
5414		DPRINTF(sc, IWN_DEBUG_RESET,
5415		    "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
5416		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5417		    sizeof txmask, 0);
5418		if (error != 0) {
5419			device_printf(sc->sc_dev,
5420			    "%s: could not configure valid TX chains, "
5421			    "error %d\n", __func__, error);
5422			return error;
5423		}
5424	}
5425
5426	/* Configure bluetooth coexistence. */
5427	if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX)
5428		error = iwn_send_advanced_btcoex(sc);
5429	else
5430		error = iwn_send_btcoex(sc);
5431	if (error != 0) {
5432		device_printf(sc->sc_dev,
5433		    "%s: could not configure bluetooth coexistence, error %d\n",
5434		    __func__, error);
5435		return error;
5436	}
5437
5438	/* Set mode, channel, RX filter and enable RX. */
5439	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5440	memset(sc->rxon, 0, sizeof (struct iwn_rxon));
5441	IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp));
5442	IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp));
5443	sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
5444	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5445	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
5446		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5447	switch (ic->ic_opmode) {
5448	case IEEE80211_M_STA:
5449		sc->rxon->mode = IWN_MODE_STA;
5450		sc->rxon->filter = htole32(IWN_FILTER_MULTICAST);
5451		break;
5452	case IEEE80211_M_MONITOR:
5453		sc->rxon->mode = IWN_MODE_MONITOR;
5454		sc->rxon->filter = htole32(IWN_FILTER_MULTICAST |
5455		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5456		break;
5457	default:
5458		/* Should not get there. */
5459		break;
5460	}
5461	sc->rxon->cck_mask  = 0x0f;	/* not yet negotiated */
5462	sc->rxon->ofdm_mask = 0xff;	/* not yet negotiated */
5463	sc->rxon->ht_single_mask = 0xff;
5464	sc->rxon->ht_dual_mask = 0xff;
5465	sc->rxon->ht_triple_mask = 0xff;
5466	rxchain =
5467	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5468	    IWN_RXCHAIN_MIMO_COUNT(2) |
5469	    IWN_RXCHAIN_IDLE_COUNT(2);
5470	sc->rxon->rxchain = htole16(rxchain);
5471	DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
5472	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0);
5473	if (error != 0) {
5474		device_printf(sc->sc_dev, "%s: RXON command failed\n",
5475		    __func__);
5476		return error;
5477	}
5478
5479	if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5480		device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
5481		    __func__);
5482		return error;
5483	}
5484
5485	/* Configuration has changed, set TX power accordingly. */
5486	if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
5487		device_printf(sc->sc_dev, "%s: could not set TX power\n",
5488		    __func__);
5489		return error;
5490	}
5491
5492	if ((error = iwn_set_critical_temp(sc)) != 0) {
5493		device_printf(sc->sc_dev,
5494		    "%s: could not set critical temperature\n", __func__);
5495		return error;
5496	}
5497
5498	/* Set power saving level to CAM during initialization. */
5499	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5500		device_printf(sc->sc_dev,
5501		    "%s: could not set power saving level\n", __func__);
5502		return error;
5503	}
5504
5505	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5506
5507	return 0;
5508}
5509
5510/*
5511 * Add an ssid element to a frame.
5512 */
5513static uint8_t *
5514ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
5515{
5516	*frm++ = IEEE80211_ELEMID_SSID;
5517	*frm++ = len;
5518	memcpy(frm, ssid, len);
5519	return frm + len;
5520}
5521
5522static int
5523iwn_scan(struct iwn_softc *sc)
5524{
5525	struct ifnet *ifp = sc->sc_ifp;
5526	struct ieee80211com *ic = ifp->if_l2com;
5527	struct ieee80211_scan_state *ss = ic->ic_scan;	/*XXX*/
5528	struct ieee80211_node *ni = ss->ss_vap->iv_bss;
5529	struct iwn_scan_hdr *hdr;
5530	struct iwn_cmd_data *tx;
5531	struct iwn_scan_essid *essid;
5532	struct iwn_scan_chan *chan;
5533	struct ieee80211_frame *wh;
5534	struct ieee80211_rateset *rs;
5535	struct ieee80211_channel *c;
5536	uint8_t *buf, *frm;
5537	uint16_t rxchain;
5538	uint8_t txant;
5539	int buflen, error;
5540
5541	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5542
5543	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5544	buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5545	if (buf == NULL) {
5546		device_printf(sc->sc_dev,
5547		    "%s: could not allocate buffer for scan command\n",
5548		    __func__);
5549		return ENOMEM;
5550	}
5551	hdr = (struct iwn_scan_hdr *)buf;
5552	/*
5553	 * Move to the next channel if no frames are received within 10ms
5554	 * after sending the probe request.
5555	 */
5556	hdr->quiet_time = htole16(10);		/* timeout in milliseconds */
5557	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5558
5559	/* Select antennas for scanning. */
5560	rxchain =
5561	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5562	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5563	    IWN_RXCHAIN_DRIVER_FORCE;
5564	if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
5565	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5566		/* Ant A must be avoided in 5GHz because of an HW bug. */
5567		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5568	} else	/* Use all available RX antennas. */
5569		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5570	hdr->rxchain = htole16(rxchain);
5571	hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5572
5573	tx = (struct iwn_cmd_data *)(hdr + 1);
5574	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5575	tx->id = sc->broadcast_id;
5576	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5577
5578	if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
5579		/* Send probe requests at 6Mbps. */
5580		tx->rate = htole32(0xd);
5581		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5582	} else {
5583		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5584		if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5585		    sc->rxon->associd && sc->rxon->chan > 14)
5586			tx->rate = htole32(0xd);
5587		else {
5588			/* Send probe requests at 1Mbps. */
5589			tx->rate = htole32(10 | IWN_RFLAG_CCK);
5590		}
5591		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5592	}
5593	/* Use the first valid TX antenna. */
5594	txant = IWN_LSB(sc->txchainmask);
5595	tx->rate |= htole32(IWN_RFLAG_ANT(txant));
5596
5597	essid = (struct iwn_scan_essid *)(tx + 1);
5598	if (ss->ss_ssid[0].len != 0) {
5599		essid[0].id = IEEE80211_ELEMID_SSID;
5600		essid[0].len = ss->ss_ssid[0].len;
5601		memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
5602	}
5603	/*
5604	 * Build a probe request frame.  Most of the following code is a
5605	 * copy & paste of what is done in net80211.
5606	 */
5607	wh = (struct ieee80211_frame *)(essid + 20);
5608	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5609	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5610	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5611	IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
5612	IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
5613	IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
5614	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
5615	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
5616
5617	frm = (uint8_t *)(wh + 1);
5618	frm = ieee80211_add_ssid(frm, NULL, 0);
5619	frm = ieee80211_add_rates(frm, rs);
5620	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5621		frm = ieee80211_add_xrates(frm, rs);
5622	if (ic->ic_htcaps & IEEE80211_HTC_HT)
5623		frm = ieee80211_add_htcap(frm, ni);
5624
5625	/* Set length of probe request. */
5626	tx->len = htole16(frm - (uint8_t *)wh);
5627
5628	c = ic->ic_curchan;
5629	chan = (struct iwn_scan_chan *)frm;
5630	chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5631	chan->flags = 0;
5632	if (ss->ss_nssid > 0)
5633		chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5634	chan->dsp_gain = 0x6e;
5635	if (IEEE80211_IS_CHAN_5GHZ(c) &&
5636	    !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5637		chan->rf_gain = 0x3b;
5638		chan->active  = htole16(24);
5639		chan->passive = htole16(110);
5640		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5641	} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
5642		chan->rf_gain = 0x3b;
5643		chan->active  = htole16(24);
5644		if (sc->rxon->associd)
5645			chan->passive = htole16(78);
5646		else
5647			chan->passive = htole16(110);
5648		hdr->crc_threshold = 0xffff;
5649	} else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5650		chan->rf_gain = 0x28;
5651		chan->active  = htole16(36);
5652		chan->passive = htole16(120);
5653		chan->flags |= htole32(IWN_CHAN_ACTIVE);
5654	} else {
5655		chan->rf_gain = 0x28;
5656		chan->active  = htole16(36);
5657		if (sc->rxon->associd)
5658			chan->passive = htole16(88);
5659		else
5660			chan->passive = htole16(120);
5661		hdr->crc_threshold = 0xffff;
5662	}
5663
5664	DPRINTF(sc, IWN_DEBUG_STATE,
5665	    "%s: chan %u flags 0x%x rf_gain 0x%x "
5666	    "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
5667	    chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
5668	    chan->active, chan->passive);
5669
5670	hdr->nchan++;
5671	chan++;
5672	buflen = (uint8_t *)chan - buf;
5673	hdr->len = htole16(buflen);
5674
5675	DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
5676	    hdr->nchan);
5677	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5678	free(buf, M_DEVBUF);
5679
5680	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5681
5682	return error;
5683}
5684
5685static int
5686iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
5687{
5688	struct iwn_ops *ops = &sc->ops;
5689	struct ifnet *ifp = sc->sc_ifp;
5690	struct ieee80211com *ic = ifp->if_l2com;
5691	struct ieee80211_node *ni = vap->iv_bss;
5692	int error;
5693
5694	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5695
5696	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5697	/* Update adapter configuration. */
5698	IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
5699	sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5700	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5701	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5702		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5703	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5704		sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
5705	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5706		sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
5707	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5708		sc->rxon->cck_mask  = 0;
5709		sc->rxon->ofdm_mask = 0x15;
5710	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5711		sc->rxon->cck_mask  = 0x03;
5712		sc->rxon->ofdm_mask = 0;
5713	} else {
5714		/* Assume 802.11b/g. */
5715		sc->rxon->cck_mask  = 0x0f;
5716		sc->rxon->ofdm_mask = 0x15;
5717	}
5718	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
5719	    sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask,
5720	    sc->rxon->ofdm_mask);
5721	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5722	if (error != 0) {
5723		device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
5724		    __func__, error);
5725		return error;
5726	}
5727
5728	/* Configuration has changed, set TX power accordingly. */
5729	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5730		device_printf(sc->sc_dev,
5731		    "%s: could not set TX power, error %d\n", __func__, error);
5732		return error;
5733	}
5734	/*
5735	 * Reconfiguring RXON clears the firmware nodes table so we must
5736	 * add the broadcast node again.
5737	 */
5738	if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5739		device_printf(sc->sc_dev,
5740		    "%s: could not add broadcast node, error %d\n", __func__,
5741		    error);
5742		return error;
5743	}
5744
5745	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5746
5747	return 0;
5748}
5749
5750static int
5751iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
5752{
5753	struct iwn_ops *ops = &sc->ops;
5754	struct ifnet *ifp = sc->sc_ifp;
5755	struct ieee80211com *ic = ifp->if_l2com;
5756	struct ieee80211_node *ni = vap->iv_bss;
5757	struct iwn_node_info node;
5758	uint32_t htflags = 0;
5759	int error;
5760
5761	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5762
5763	sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5764	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5765		/* Link LED blinks while monitoring. */
5766		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5767		return 0;
5768	}
5769	if ((error = iwn_set_timing(sc, ni)) != 0) {
5770		device_printf(sc->sc_dev,
5771		    "%s: could not set timing, error %d\n", __func__, error);
5772		return error;
5773	}
5774
5775	/* Update adapter configuration. */
5776	IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
5777	sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd));
5778	sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5779	sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5780	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5781		sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5782	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5783		sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
5784	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5785		sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
5786	if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5787		sc->rxon->cck_mask  = 0;
5788		sc->rxon->ofdm_mask = 0x15;
5789	} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5790		sc->rxon->cck_mask  = 0x03;
5791		sc->rxon->ofdm_mask = 0;
5792	} else {
5793		/* Assume 802.11b/g. */
5794		sc->rxon->cck_mask  = 0x0f;
5795		sc->rxon->ofdm_mask = 0x15;
5796	}
5797	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5798		htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
5799		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
5800			switch (ic->ic_curhtprotmode) {
5801			case IEEE80211_HTINFO_OPMODE_HT20PR:
5802				htflags |= IWN_RXON_HT_MODEPURE40;
5803				break;
5804			default:
5805				htflags |= IWN_RXON_HT_MODEMIXED;
5806				break;
5807			}
5808		}
5809		if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
5810			htflags |= IWN_RXON_HT_HT40MINUS;
5811	}
5812	sc->rxon->flags |= htole32(htflags);
5813	sc->rxon->filter |= htole32(IWN_FILTER_BSS);
5814	DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
5815	    sc->rxon->chan, sc->rxon->flags);
5816	error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5817	if (error != 0) {
5818		device_printf(sc->sc_dev,
5819		    "%s: could not update configuration, error %d\n", __func__,
5820		    error);
5821		return error;
5822	}
5823
5824	/* Configuration has changed, set TX power accordingly. */
5825	if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5826		device_printf(sc->sc_dev,
5827		    "%s: could not set TX power, error %d\n", __func__, error);
5828		return error;
5829	}
5830
5831	/* Fake a join to initialize the TX rate. */
5832	((struct iwn_node *)ni)->id = IWN_ID_BSS;
5833	iwn_newassoc(ni, 1);
5834
5835	/* Add BSS node. */
5836	memset(&node, 0, sizeof node);
5837	IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5838	node.id = IWN_ID_BSS;
5839	if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5840		switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
5841		case IEEE80211_HTCAP_SMPS_ENA:
5842			node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
5843			break;
5844		case IEEE80211_HTCAP_SMPS_DYNAMIC:
5845			node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
5846			break;
5847		}
5848		node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
5849		    IWN_AMDPU_DENSITY(5));	/* 4us */
5850		if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
5851			node.htflags |= htole32(IWN_NODE_HT40);
5852	}
5853	DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
5854	error = ops->add_node(sc, &node, 1);
5855	if (error != 0) {
5856		device_printf(sc->sc_dev,
5857		    "%s: could not add BSS node, error %d\n", __func__, error);
5858		return error;
5859	}
5860	DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
5861	    __func__, node.id);
5862	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5863		device_printf(sc->sc_dev,
5864		    "%s: could not setup link quality for node %d, error %d\n",
5865		    __func__, node.id, error);
5866		return error;
5867	}
5868
5869	if ((error = iwn_init_sensitivity(sc)) != 0) {
5870		device_printf(sc->sc_dev,
5871		    "%s: could not set sensitivity, error %d\n", __func__,
5872		    error);
5873		return error;
5874	}
5875	/* Start periodic calibration timer. */
5876	sc->calib.state = IWN_CALIB_STATE_ASSOC;
5877	sc->calib_cnt = 0;
5878	callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
5879	    sc);
5880
5881	/* Link LED always on while associated. */
5882	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5883
5884	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5885
5886	return 0;
5887}
5888
5889/*
5890 * This function is called by upper layer when an ADDBA request is received
5891 * from another STA and before the ADDBA response is sent.
5892 */
5893static int
5894iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
5895    int baparamset, int batimeout, int baseqctl)
5896{
5897#define MS(_v, _f)	(((_v) & _f) >> _f##_S)
5898	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5899	struct iwn_ops *ops = &sc->ops;
5900	struct iwn_node *wn = (void *)ni;
5901	struct iwn_node_info node;
5902	uint16_t ssn;
5903	uint8_t tid;
5904	int error;
5905
5906	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5907
5908	tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
5909	ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
5910
5911	memset(&node, 0, sizeof node);
5912	node.id = wn->id;
5913	node.control = IWN_NODE_UPDATE;
5914	node.flags = IWN_FLAG_SET_ADDBA;
5915	node.addba_tid = tid;
5916	node.addba_ssn = htole16(ssn);
5917	DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5918	    wn->id, tid, ssn);
5919	error = ops->add_node(sc, &node, 1);
5920	if (error != 0)
5921		return error;
5922	return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
5923#undef MS
5924}
5925
5926/*
5927 * This function is called by upper layer on teardown of an HT-immediate
5928 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5929 */
5930static void
5931iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
5932{
5933	struct ieee80211com *ic = ni->ni_ic;
5934	struct iwn_softc *sc = ic->ic_ifp->if_softc;
5935	struct iwn_ops *ops = &sc->ops;
5936	struct iwn_node *wn = (void *)ni;
5937	struct iwn_node_info node;
5938	uint8_t tid;
5939
5940	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5941
5942	/* XXX: tid as an argument */
5943	for (tid = 0; tid < WME_NUM_TID; tid++) {
5944		if (&ni->ni_rx_ampdu[tid] == rap)
5945			break;
5946	}
5947
5948	memset(&node, 0, sizeof node);
5949	node.id = wn->id;
5950	node.control = IWN_NODE_UPDATE;
5951	node.flags = IWN_FLAG_SET_DELBA;
5952	node.delba_tid = tid;
5953	DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5954	(void)ops->add_node(sc, &node, 1);
5955	sc->sc_ampdu_rx_stop(ni, rap);
5956}
5957
5958static int
5959iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5960    int dialogtoken, int baparamset, int batimeout)
5961{
5962	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5963	int qid;
5964
5965	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5966
5967	for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
5968		if (sc->qid2tap[qid] == NULL)
5969			break;
5970	}
5971	if (qid == sc->ntxqs) {
5972		DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
5973		    __func__);
5974		return 0;
5975	}
5976	tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
5977	if (tap->txa_private == NULL) {
5978		device_printf(sc->sc_dev,
5979		    "%s: failed to alloc TX aggregation structure\n", __func__);
5980		return 0;
5981	}
5982	sc->qid2tap[qid] = tap;
5983	*(int *)tap->txa_private = qid;
5984	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5985	    batimeout);
5986}
5987
5988static int
5989iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5990    int code, int baparamset, int batimeout)
5991{
5992	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5993	int qid = *(int *)tap->txa_private;
5994	uint8_t tid = tap->txa_tid;
5995	int ret;
5996
5997	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5998
5999	if (code == IEEE80211_STATUS_SUCCESS) {
6000		ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
6001		ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
6002		if (ret != 1)
6003			return ret;
6004	} else {
6005		sc->qid2tap[qid] = NULL;
6006		free(tap->txa_private, M_DEVBUF);
6007		tap->txa_private = NULL;
6008	}
6009	return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
6010}
6011
6012/*
6013 * This function is called by upper layer when an ADDBA response is received
6014 * from another STA.
6015 */
6016static int
6017iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
6018    uint8_t tid)
6019{
6020	struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
6021	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
6022	struct iwn_ops *ops = &sc->ops;
6023	struct iwn_node *wn = (void *)ni;
6024	struct iwn_node_info node;
6025	int error, qid;
6026
6027	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6028
6029	/* Enable TX for the specified RA/TID. */
6030	wn->disable_tid &= ~(1 << tid);
6031	memset(&node, 0, sizeof node);
6032	node.id = wn->id;
6033	node.control = IWN_NODE_UPDATE;
6034	node.flags = IWN_FLAG_SET_DISABLE_TID;
6035	node.disable_tid = htole16(wn->disable_tid);
6036	error = ops->add_node(sc, &node, 1);
6037	if (error != 0)
6038		return 0;
6039
6040	if ((error = iwn_nic_lock(sc)) != 0)
6041		return 0;
6042	qid = *(int *)tap->txa_private;
6043	DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
6044	    __func__, wn->id, tid, tap->txa_start, qid);
6045	ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
6046	iwn_nic_unlock(sc);
6047
6048	iwn_set_link_quality(sc, ni);
6049	return 1;
6050}
6051
6052static void
6053iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6054{
6055	struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
6056	struct iwn_ops *ops = &sc->ops;
6057	uint8_t tid = tap->txa_tid;
6058	int qid;
6059
6060	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6061
6062	sc->sc_addba_stop(ni, tap);
6063
6064	if (tap->txa_private == NULL)
6065		return;
6066
6067	qid = *(int *)tap->txa_private;
6068	if (sc->txq[qid].queued != 0)
6069		return;
6070	if (iwn_nic_lock(sc) != 0)
6071		return;
6072	ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
6073	iwn_nic_unlock(sc);
6074	sc->qid2tap[qid] = NULL;
6075	free(tap->txa_private, M_DEVBUF);
6076	tap->txa_private = NULL;
6077}
6078
6079static void
6080iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6081    int qid, uint8_t tid, uint16_t ssn)
6082{
6083	struct iwn_node *wn = (void *)ni;
6084
6085	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6086
6087	/* Stop TX scheduler while we're changing its configuration. */
6088	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6089	    IWN4965_TXQ_STATUS_CHGACT);
6090
6091	/* Assign RA/TID translation to the queue. */
6092	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
6093	    wn->id << 4 | tid);
6094
6095	/* Enable chain-building mode for the queue. */
6096	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
6097
6098	/* Set starting sequence number from the ADDBA request. */
6099	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
6100	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6101	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6102
6103	/* Set scheduler window size. */
6104	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
6105	    IWN_SCHED_WINSZ);
6106	/* Set scheduler frame limit. */
6107	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6108	    IWN_SCHED_LIMIT << 16);
6109
6110	/* Enable interrupts for the queue. */
6111	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6112
6113	/* Mark the queue as active. */
6114	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6115	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
6116	    iwn_tid2fifo[tid] << 1);
6117}
6118
6119static void
6120iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
6121{
6122	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6123
6124	/* Stop TX scheduler while we're changing its configuration. */
6125	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6126	    IWN4965_TXQ_STATUS_CHGACT);
6127
6128	/* Set starting sequence number from the ADDBA request. */
6129	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6130	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6131
6132	/* Disable interrupts for the queue. */
6133	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6134
6135	/* Mark the queue as inactive. */
6136	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6137	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
6138}
6139
6140static void
6141iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6142    int qid, uint8_t tid, uint16_t ssn)
6143{
6144	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6145
6146	struct iwn_node *wn = (void *)ni;
6147
6148	/* Stop TX scheduler while we're changing its configuration. */
6149	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6150	    IWN5000_TXQ_STATUS_CHGACT);
6151
6152	/* Assign RA/TID translation to the queue. */
6153	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
6154	    wn->id << 4 | tid);
6155
6156	/* Enable chain-building mode for the queue. */
6157	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
6158
6159	/* Enable aggregation for the queue. */
6160	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6161
6162	/* Set starting sequence number from the ADDBA request. */
6163	sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
6164	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6165	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6166
6167	/* Set scheduler window size and frame limit. */
6168	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6169	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6170
6171	/* Enable interrupts for the queue. */
6172	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6173
6174	/* Mark the queue as active. */
6175	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6176	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
6177}
6178
6179static void
6180iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
6181{
6182	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6183
6184	/* Stop TX scheduler while we're changing its configuration. */
6185	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6186	    IWN5000_TXQ_STATUS_CHGACT);
6187
6188	/* Disable aggregation for the queue. */
6189	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6190
6191	/* Set starting sequence number from the ADDBA request. */
6192	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6193	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6194
6195	/* Disable interrupts for the queue. */
6196	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6197
6198	/* Mark the queue as inactive. */
6199	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6200	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
6201}
6202
6203/*
6204 * Query calibration tables from the initialization firmware.  We do this
6205 * only once at first boot.  Called from a process context.
6206 */
6207static int
6208iwn5000_query_calibration(struct iwn_softc *sc)
6209{
6210	struct iwn5000_calib_config cmd;
6211	int error;
6212
6213	memset(&cmd, 0, sizeof cmd);
6214	cmd.ucode.once.enable = 0xffffffff;
6215	cmd.ucode.once.start  = 0xffffffff;
6216	cmd.ucode.once.send   = 0xffffffff;
6217	cmd.ucode.flags       = 0xffffffff;
6218	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
6219	    __func__);
6220	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
6221	if (error != 0)
6222		return error;
6223
6224	/* Wait at most two seconds for calibration to complete. */
6225	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
6226		error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
6227	return error;
6228}
6229
6230/*
6231 * Send calibration results to the runtime firmware.  These results were
6232 * obtained on first boot from the initialization firmware.
6233 */
6234static int
6235iwn5000_send_calibration(struct iwn_softc *sc)
6236{
6237	int idx, error;
6238
6239	for (idx = 0; idx < 5; idx++) {
6240		if (sc->calibcmd[idx].buf == NULL)
6241			continue;	/* No results available. */
6242		DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6243		    "send calibration result idx=%d len=%d\n", idx,
6244		    sc->calibcmd[idx].len);
6245		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
6246		    sc->calibcmd[idx].len, 0);
6247		if (error != 0) {
6248			device_printf(sc->sc_dev,
6249			    "%s: could not send calibration result, error %d\n",
6250			    __func__, error);
6251			return error;
6252		}
6253	}
6254	return 0;
6255}
6256
6257static int
6258iwn5000_send_wimax_coex(struct iwn_softc *sc)
6259{
6260	struct iwn5000_wimax_coex wimax;
6261
6262#ifdef notyet
6263	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6264		/* Enable WiMAX coexistence for combo adapters. */
6265		wimax.flags =
6266		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
6267		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
6268		    IWN_WIMAX_COEX_STA_TABLE_VALID |
6269		    IWN_WIMAX_COEX_ENABLE;
6270		memcpy(wimax.events, iwn6050_wimax_events,
6271		    sizeof iwn6050_wimax_events);
6272	} else
6273#endif
6274	{
6275		/* Disable WiMAX coexistence. */
6276		wimax.flags = 0;
6277		memset(wimax.events, 0, sizeof wimax.events);
6278	}
6279	DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
6280	    __func__);
6281	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6282}
6283
6284static int
6285iwn5000_crystal_calib(struct iwn_softc *sc)
6286{
6287	struct iwn5000_phy_calib_crystal cmd;
6288
6289	memset(&cmd, 0, sizeof cmd);
6290	cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6291	cmd.ngroups = 1;
6292	cmd.isvalid = 1;
6293	cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
6294	cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
6295	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
6296	    cmd.cap_pin[0], cmd.cap_pin[1]);
6297	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6298}
6299
6300static int
6301iwn5000_temp_offset_calib(struct iwn_softc *sc)
6302{
6303	struct iwn5000_phy_calib_temp_offset cmd;
6304
6305	memset(&cmd, 0, sizeof cmd);
6306	cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
6307	cmd.ngroups = 1;
6308	cmd.isvalid = 1;
6309	if (sc->eeprom_temp != 0)
6310		cmd.offset = htole16(sc->eeprom_temp);
6311	else
6312		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6313	DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
6314	    le16toh(cmd.offset));
6315	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6316}
6317
6318/*
6319 * This function is called after the runtime firmware notifies us of its
6320 * readiness (called in a process context).
6321 */
6322static int
6323iwn4965_post_alive(struct iwn_softc *sc)
6324{
6325	int error, qid;
6326
6327	if ((error = iwn_nic_lock(sc)) != 0)
6328		return error;
6329
6330	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6331
6332	/* Clear TX scheduler state in SRAM. */
6333	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6334	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6335	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6336
6337	/* Set physical address of TX scheduler rings (1KB aligned). */
6338	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6339
6340	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6341
6342	/* Disable chain mode for all our 16 queues. */
6343	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6344
6345	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6346		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6347		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6348
6349		/* Set scheduler window size. */
6350		iwn_mem_write(sc, sc->sched_base +
6351		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6352		/* Set scheduler frame limit. */
6353		iwn_mem_write(sc, sc->sched_base +
6354		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6355		    IWN_SCHED_LIMIT << 16);
6356	}
6357
6358	/* Enable interrupts for all our 16 queues. */
6359	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6360	/* Identify TX FIFO rings (0-7). */
6361	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6362
6363	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6364	for (qid = 0; qid < 7; qid++) {
6365		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6366		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6367		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6368	}
6369	iwn_nic_unlock(sc);
6370	return 0;
6371}
6372
6373/*
6374 * This function is called after the initialization or runtime firmware
6375 * notifies us of its readiness (called in a process context).
6376 */
6377static int
6378iwn5000_post_alive(struct iwn_softc *sc)
6379{
6380	int error, qid;
6381
6382	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6383
6384	/* Switch to using ICT interrupt mode. */
6385	iwn5000_ict_reset(sc);
6386
6387	if ((error = iwn_nic_lock(sc)) != 0){
6388		DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
6389		return error;
6390	}
6391
6392	/* Clear TX scheduler state in SRAM. */
6393	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6394	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6395	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6396
6397	/* Set physical address of TX scheduler rings (1KB aligned). */
6398	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6399
6400	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6401
6402	/* Enable chain mode for all queues, except command queue. */
6403	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6404	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6405
6406	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6407		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6408		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6409
6410		iwn_mem_write(sc, sc->sched_base +
6411		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6412		/* Set scheduler window size and frame limit. */
6413		iwn_mem_write(sc, sc->sched_base +
6414		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6415		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6416	}
6417
6418	/* Enable interrupts for all our 20 queues. */
6419	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6420	/* Identify TX FIFO rings (0-7). */
6421	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6422
6423	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6424	for (qid = 0; qid < 7; qid++) {
6425		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6426		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6427		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6428	}
6429	iwn_nic_unlock(sc);
6430
6431	/* Configure WiMAX coexistence for combo adapters. */
6432	error = iwn5000_send_wimax_coex(sc);
6433	if (error != 0) {
6434		device_printf(sc->sc_dev,
6435		    "%s: could not configure WiMAX coexistence, error %d\n",
6436		    __func__, error);
6437		return error;
6438	}
6439	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6440		/* Perform crystal calibration. */
6441		error = iwn5000_crystal_calib(sc);
6442		if (error != 0) {
6443			device_printf(sc->sc_dev,
6444			    "%s: crystal calibration failed, error %d\n",
6445			    __func__, error);
6446			return error;
6447		}
6448	}
6449	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6450		/* Query calibration from the initialization firmware. */
6451		if ((error = iwn5000_query_calibration(sc)) != 0) {
6452			device_printf(sc->sc_dev,
6453			    "%s: could not query calibration, error %d\n",
6454			    __func__, error);
6455			return error;
6456		}
6457		/*
6458		 * We have the calibration results now, reboot with the
6459		 * runtime firmware (call ourselves recursively!)
6460		 */
6461		iwn_hw_stop(sc);
6462		error = iwn_hw_init(sc);
6463	} else {
6464		/* Send calibration results to runtime firmware. */
6465		error = iwn5000_send_calibration(sc);
6466	}
6467
6468	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6469
6470	return error;
6471}
6472
6473/*
6474 * The firmware boot code is small and is intended to be copied directly into
6475 * the NIC internal memory (no DMA transfer).
6476 */
6477static int
6478iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6479{
6480	int error, ntries;
6481
6482	size /= sizeof (uint32_t);
6483
6484	if ((error = iwn_nic_lock(sc)) != 0)
6485		return error;
6486
6487	/* Copy microcode image into NIC memory. */
6488	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6489	    (const uint32_t *)ucode, size);
6490
6491	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6492	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6493	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6494
6495	/* Start boot load now. */
6496	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6497
6498	/* Wait for transfer to complete. */
6499	for (ntries = 0; ntries < 1000; ntries++) {
6500		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6501		    IWN_BSM_WR_CTRL_START))
6502			break;
6503		DELAY(10);
6504	}
6505	if (ntries == 1000) {
6506		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6507		    __func__);
6508		iwn_nic_unlock(sc);
6509		return ETIMEDOUT;
6510	}
6511
6512	/* Enable boot after power up. */
6513	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6514
6515	iwn_nic_unlock(sc);
6516	return 0;
6517}
6518
6519static int
6520iwn4965_load_firmware(struct iwn_softc *sc)
6521{
6522	struct iwn_fw_info *fw = &sc->fw;
6523	struct iwn_dma_info *dma = &sc->fw_dma;
6524	int error;
6525
6526	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6527	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6528	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6529	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6530	    fw->init.text, fw->init.textsz);
6531	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6532
6533	/* Tell adapter where to find initialization sections. */
6534	if ((error = iwn_nic_lock(sc)) != 0)
6535		return error;
6536	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6537	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6538	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6539	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6540	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6541	iwn_nic_unlock(sc);
6542
6543	/* Load firmware boot code. */
6544	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6545	if (error != 0) {
6546		device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6547		    __func__);
6548		return error;
6549	}
6550	/* Now press "execute". */
6551	IWN_WRITE(sc, IWN_RESET, 0);
6552
6553	/* Wait at most one second for first alive notification. */
6554	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6555		device_printf(sc->sc_dev,
6556		    "%s: timeout waiting for adapter to initialize, error %d\n",
6557		    __func__, error);
6558		return error;
6559	}
6560
6561	/* Retrieve current temperature for initial TX power calibration. */
6562	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6563	sc->temp = iwn4965_get_temperature(sc);
6564
6565	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6566	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6567	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6568	memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6569	    fw->main.text, fw->main.textsz);
6570	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6571
6572	/* Tell adapter where to find runtime sections. */
6573	if ((error = iwn_nic_lock(sc)) != 0)
6574		return error;
6575	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6576	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6577	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6578	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6579	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6580	    IWN_FW_UPDATED | fw->main.textsz);
6581	iwn_nic_unlock(sc);
6582
6583	return 0;
6584}
6585
6586static int
6587iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6588    const uint8_t *section, int size)
6589{
6590	struct iwn_dma_info *dma = &sc->fw_dma;
6591	int error;
6592
6593	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6594
6595	/* Copy firmware section into pre-allocated DMA-safe memory. */
6596	memcpy(dma->vaddr, section, size);
6597	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6598
6599	if ((error = iwn_nic_lock(sc)) != 0)
6600		return error;
6601
6602	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6603	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6604
6605	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6606	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6607	    IWN_LOADDR(dma->paddr));
6608	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6609	    IWN_HIADDR(dma->paddr) << 28 | size);
6610	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6611	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6612	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6613	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6614
6615	/* Kick Flow Handler to start DMA transfer. */
6616	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6617	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6618
6619	iwn_nic_unlock(sc);
6620
6621	/* Wait at most five seconds for FH DMA transfer to complete. */
6622	return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
6623}
6624
6625static int
6626iwn5000_load_firmware(struct iwn_softc *sc)
6627{
6628	struct iwn_fw_part *fw;
6629	int error;
6630
6631	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6632
6633	/* Load the initialization firmware on first boot only. */
6634	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6635	    &sc->fw.main : &sc->fw.init;
6636
6637	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6638	    fw->text, fw->textsz);
6639	if (error != 0) {
6640		device_printf(sc->sc_dev,
6641		    "%s: could not load firmware %s section, error %d\n",
6642		    __func__, ".text", error);
6643		return error;
6644	}
6645	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6646	    fw->data, fw->datasz);
6647	if (error != 0) {
6648		device_printf(sc->sc_dev,
6649		    "%s: could not load firmware %s section, error %d\n",
6650		    __func__, ".data", error);
6651		return error;
6652	}
6653
6654	/* Now press "execute". */
6655	IWN_WRITE(sc, IWN_RESET, 0);
6656	return 0;
6657}
6658
6659/*
6660 * Extract text and data sections from a legacy firmware image.
6661 */
6662static int
6663iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6664{
6665	const uint32_t *ptr;
6666	size_t hdrlen = 24;
6667	uint32_t rev;
6668
6669	ptr = (const uint32_t *)fw->data;
6670	rev = le32toh(*ptr++);
6671
6672	/* Check firmware API version. */
6673	if (IWN_FW_API(rev) <= 1) {
6674		device_printf(sc->sc_dev,
6675		    "%s: bad firmware, need API version >=2\n", __func__);
6676		return EINVAL;
6677	}
6678	if (IWN_FW_API(rev) >= 3) {
6679		/* Skip build number (version 2 header). */
6680		hdrlen += 4;
6681		ptr++;
6682	}
6683	if (fw->size < hdrlen) {
6684		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6685		    __func__, fw->size);
6686		return EINVAL;
6687	}
6688	fw->main.textsz = le32toh(*ptr++);
6689	fw->main.datasz = le32toh(*ptr++);
6690	fw->init.textsz = le32toh(*ptr++);
6691	fw->init.datasz = le32toh(*ptr++);
6692	fw->boot.textsz = le32toh(*ptr++);
6693
6694	/* Check that all firmware sections fit. */
6695	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6696	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6697		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6698		    __func__, fw->size);
6699		return EINVAL;
6700	}
6701
6702	/* Get pointers to firmware sections. */
6703	fw->main.text = (const uint8_t *)ptr;
6704	fw->main.data = fw->main.text + fw->main.textsz;
6705	fw->init.text = fw->main.data + fw->main.datasz;
6706	fw->init.data = fw->init.text + fw->init.textsz;
6707	fw->boot.text = fw->init.data + fw->init.datasz;
6708	return 0;
6709}
6710
6711/*
6712 * Extract text and data sections from a TLV firmware image.
6713 */
6714static int
6715iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6716    uint16_t alt)
6717{
6718	const struct iwn_fw_tlv_hdr *hdr;
6719	const struct iwn_fw_tlv *tlv;
6720	const uint8_t *ptr, *end;
6721	uint64_t altmask;
6722	uint32_t len, tmp;
6723
6724	if (fw->size < sizeof (*hdr)) {
6725		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6726		    __func__, fw->size);
6727		return EINVAL;
6728	}
6729	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6730	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6731		device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
6732		    __func__, le32toh(hdr->signature));
6733		return EINVAL;
6734	}
6735	DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
6736	    le32toh(hdr->build));
6737
6738	/*
6739	 * Select the closest supported alternative that is less than
6740	 * or equal to the specified one.
6741	 */
6742	altmask = le64toh(hdr->altmask);
6743	while (alt > 0 && !(altmask & (1ULL << alt)))
6744		alt--;	/* Downgrade. */
6745	DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
6746
6747	ptr = (const uint8_t *)(hdr + 1);
6748	end = (const uint8_t *)(fw->data + fw->size);
6749
6750	/* Parse type-length-value fields. */
6751	while (ptr + sizeof (*tlv) <= end) {
6752		tlv = (const struct iwn_fw_tlv *)ptr;
6753		len = le32toh(tlv->len);
6754
6755		ptr += sizeof (*tlv);
6756		if (ptr + len > end) {
6757			device_printf(sc->sc_dev,
6758			    "%s: firmware too short: %zu bytes\n", __func__,
6759			    fw->size);
6760			return EINVAL;
6761		}
6762		/* Skip other alternatives. */
6763		if (tlv->alt != 0 && tlv->alt != htole16(alt))
6764			goto next;
6765
6766		switch (le16toh(tlv->type)) {
6767		case IWN_FW_TLV_MAIN_TEXT:
6768			fw->main.text = ptr;
6769			fw->main.textsz = len;
6770			break;
6771		case IWN_FW_TLV_MAIN_DATA:
6772			fw->main.data = ptr;
6773			fw->main.datasz = len;
6774			break;
6775		case IWN_FW_TLV_INIT_TEXT:
6776			fw->init.text = ptr;
6777			fw->init.textsz = len;
6778			break;
6779		case IWN_FW_TLV_INIT_DATA:
6780			fw->init.data = ptr;
6781			fw->init.datasz = len;
6782			break;
6783		case IWN_FW_TLV_BOOT_TEXT:
6784			fw->boot.text = ptr;
6785			fw->boot.textsz = len;
6786			break;
6787		case IWN_FW_TLV_ENH_SENS:
6788			if (!len)
6789				sc->sc_flags |= IWN_FLAG_ENH_SENS;
6790			break;
6791		case IWN_FW_TLV_PHY_CALIB:
6792			tmp = htole32(*ptr);
6793			if (tmp < 253) {
6794				sc->reset_noise_gain = tmp;
6795				sc->noise_gain = tmp + 1;
6796			}
6797			break;
6798		case IWN_FW_TLV_PAN:
6799			sc->sc_flags |= IWN_FLAG_PAN_SUPPORT;
6800			DPRINTF(sc, IWN_DEBUG_RESET,
6801			    "PAN Support found: %d\n", 1);
6802			break;
6803		case IWN_FW_TLV_FLAGS :
6804			sc->tlv_feature_flags = htole32(*ptr);
6805			break;
6806		case IWN_FW_TLV_PBREQ_MAXLEN:
6807		case IWN_FW_TLV_RUNT_EVTLOG_PTR:
6808		case IWN_FW_TLV_RUNT_EVTLOG_SIZE:
6809		case IWN_FW_TLV_RUNT_ERRLOG_PTR:
6810		case IWN_FW_TLV_INIT_EVTLOG_PTR:
6811		case IWN_FW_TLV_INIT_EVTLOG_SIZE:
6812		case IWN_FW_TLV_INIT_ERRLOG_PTR:
6813		case IWN_FW_TLV_WOWLAN_INST:
6814		case IWN_FW_TLV_WOWLAN_DATA:
6815			DPRINTF(sc, IWN_DEBUG_RESET,
6816			    "TLV type %d recognized but not handled\n",
6817			    le16toh(tlv->type));
6818			break;
6819		default:
6820			DPRINTF(sc, IWN_DEBUG_RESET,
6821			    "TLV type %d not handled\n", le16toh(tlv->type));
6822			break;
6823		}
6824 next:		/* TLV fields are 32-bit aligned. */
6825		ptr += (len + 3) & ~3;
6826	}
6827	return 0;
6828}
6829
6830static int
6831iwn_read_firmware(struct iwn_softc *sc)
6832{
6833	struct iwn_fw_info *fw = &sc->fw;
6834	int error;
6835
6836	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6837
6838	IWN_UNLOCK(sc);
6839
6840	memset(fw, 0, sizeof (*fw));
6841
6842	/* Read firmware image from filesystem. */
6843	sc->fw_fp = firmware_get(sc->fwname);
6844	if (sc->fw_fp == NULL) {
6845		device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
6846		    __func__, sc->fwname);
6847		IWN_LOCK(sc);
6848		return EINVAL;
6849	}
6850	IWN_LOCK(sc);
6851
6852	fw->size = sc->fw_fp->datasize;
6853	fw->data = (const uint8_t *)sc->fw_fp->data;
6854	if (fw->size < sizeof (uint32_t)) {
6855		device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6856		    __func__, fw->size);
6857		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6858		sc->fw_fp = NULL;
6859		return EINVAL;
6860	}
6861
6862	/* Retrieve text and data sections. */
6863	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
6864		error = iwn_read_firmware_leg(sc, fw);
6865	else
6866		error = iwn_read_firmware_tlv(sc, fw, 1);
6867	if (error != 0) {
6868		device_printf(sc->sc_dev,
6869		    "%s: could not read firmware sections, error %d\n",
6870		    __func__, error);
6871		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6872		sc->fw_fp = NULL;
6873		return error;
6874	}
6875
6876	/* Make sure text and data sections fit in hardware memory. */
6877	if (fw->main.textsz > sc->fw_text_maxsz ||
6878	    fw->main.datasz > sc->fw_data_maxsz ||
6879	    fw->init.textsz > sc->fw_text_maxsz ||
6880	    fw->init.datasz > sc->fw_data_maxsz ||
6881	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6882	    (fw->boot.textsz & 3) != 0) {
6883		device_printf(sc->sc_dev, "%s: firmware sections too large\n",
6884		    __func__);
6885		firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6886		sc->fw_fp = NULL;
6887		return EINVAL;
6888	}
6889
6890	/* We can proceed with loading the firmware. */
6891	return 0;
6892}
6893
6894static int
6895iwn_clock_wait(struct iwn_softc *sc)
6896{
6897	int ntries;
6898
6899	/* Set "initialization complete" bit. */
6900	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6901
6902	/* Wait for clock stabilization. */
6903	for (ntries = 0; ntries < 2500; ntries++) {
6904		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6905			return 0;
6906		DELAY(10);
6907	}
6908	device_printf(sc->sc_dev,
6909	    "%s: timeout waiting for clock stabilization\n", __func__);
6910	return ETIMEDOUT;
6911}
6912
6913static int
6914iwn_apm_init(struct iwn_softc *sc)
6915{
6916	uint32_t reg;
6917	int error;
6918
6919	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6920
6921	/* Disable L0s exit timer (NMI bug workaround). */
6922	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6923	/* Don't wait for ICH L0s (ICH bug workaround). */
6924	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6925
6926	/* Set FH wait threshold to max (HW bug under stress workaround). */
6927	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6928
6929	/* Enable HAP INTA to move adapter from L1a to L0s. */
6930	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6931
6932	/* Retrieve PCIe Active State Power Management (ASPM). */
6933	reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6934	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6935	if (reg & 0x02)	/* L1 Entry enabled. */
6936		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6937	else
6938		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6939
6940	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6941	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
6942		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6943
6944	/* Wait for clock stabilization before accessing prph. */
6945	if ((error = iwn_clock_wait(sc)) != 0)
6946		return error;
6947
6948	if ((error = iwn_nic_lock(sc)) != 0)
6949		return error;
6950	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6951		/* Enable DMA and BSM (Bootstrap State Machine). */
6952		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6953		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6954		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6955	} else {
6956		/* Enable DMA. */
6957		iwn_prph_write(sc, IWN_APMG_CLK_EN,
6958		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6959	}
6960	DELAY(20);
6961	/* Disable L1-Active. */
6962	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6963	iwn_nic_unlock(sc);
6964
6965	return 0;
6966}
6967
6968static void
6969iwn_apm_stop_master(struct iwn_softc *sc)
6970{
6971	int ntries;
6972
6973	/* Stop busmaster DMA activity. */
6974	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6975	for (ntries = 0; ntries < 100; ntries++) {
6976		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6977			return;
6978		DELAY(10);
6979	}
6980	device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
6981}
6982
6983static void
6984iwn_apm_stop(struct iwn_softc *sc)
6985{
6986	iwn_apm_stop_master(sc);
6987
6988	/* Reset the entire device. */
6989	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6990	DELAY(10);
6991	/* Clear "initialization complete" bit. */
6992	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6993}
6994
6995static int
6996iwn4965_nic_config(struct iwn_softc *sc)
6997{
6998	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
6999
7000	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
7001		/*
7002		 * I don't believe this to be correct but this is what the
7003		 * vendor driver is doing. Probably the bits should not be
7004		 * shifted in IWN_RFCFG_*.
7005		 */
7006		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7007		    IWN_RFCFG_TYPE(sc->rfcfg) |
7008		    IWN_RFCFG_STEP(sc->rfcfg) |
7009		    IWN_RFCFG_DASH(sc->rfcfg));
7010	}
7011	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7012	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7013	return 0;
7014}
7015
7016static int
7017iwn5000_nic_config(struct iwn_softc *sc)
7018{
7019	uint32_t tmp;
7020	int error;
7021
7022	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7023
7024	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
7025		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7026		    IWN_RFCFG_TYPE(sc->rfcfg) |
7027		    IWN_RFCFG_STEP(sc->rfcfg) |
7028		    IWN_RFCFG_DASH(sc->rfcfg));
7029	}
7030	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7031	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7032
7033	if ((error = iwn_nic_lock(sc)) != 0)
7034		return error;
7035	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
7036
7037	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
7038		/*
7039		 * Select first Switching Voltage Regulator (1.32V) to
7040		 * solve a stability issue related to noisy DC2DC line
7041		 * in the silicon of 1000 Series.
7042		 */
7043		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
7044		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
7045		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
7046		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
7047	}
7048	iwn_nic_unlock(sc);
7049
7050	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
7051		/* Use internal power amplifier only. */
7052		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
7053	}
7054	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
7055	     sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
7056		/* Indicate that ROM calibration version is >=6. */
7057		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
7058	}
7059	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
7060		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
7061	return 0;
7062}
7063
7064/*
7065 * Take NIC ownership over Intel Active Management Technology (AMT).
7066 */
7067static int
7068iwn_hw_prepare(struct iwn_softc *sc)
7069{
7070	int ntries;
7071
7072	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7073
7074	/* Check if hardware is ready. */
7075	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7076	for (ntries = 0; ntries < 5; ntries++) {
7077		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7078		    IWN_HW_IF_CONFIG_NIC_READY)
7079			return 0;
7080		DELAY(10);
7081	}
7082
7083	/* Hardware not ready, force into ready state. */
7084	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
7085	for (ntries = 0; ntries < 15000; ntries++) {
7086		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
7087		    IWN_HW_IF_CONFIG_PREPARE_DONE))
7088			break;
7089		DELAY(10);
7090	}
7091	if (ntries == 15000)
7092		return ETIMEDOUT;
7093
7094	/* Hardware should be ready now. */
7095	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7096	for (ntries = 0; ntries < 5; ntries++) {
7097		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7098		    IWN_HW_IF_CONFIG_NIC_READY)
7099			return 0;
7100		DELAY(10);
7101	}
7102	return ETIMEDOUT;
7103}
7104
7105static int
7106iwn_hw_init(struct iwn_softc *sc)
7107{
7108	struct iwn_ops *ops = &sc->ops;
7109	int error, chnl, qid;
7110
7111	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7112
7113	/* Clear pending interrupts. */
7114	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7115
7116	if ((error = iwn_apm_init(sc)) != 0) {
7117		device_printf(sc->sc_dev,
7118		    "%s: could not power ON adapter, error %d\n", __func__,
7119		    error);
7120		return error;
7121	}
7122
7123	/* Select VMAIN power source. */
7124	if ((error = iwn_nic_lock(sc)) != 0)
7125		return error;
7126	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
7127	iwn_nic_unlock(sc);
7128
7129	/* Perform adapter-specific initialization. */
7130	if ((error = ops->nic_config(sc)) != 0)
7131		return error;
7132
7133	/* Initialize RX ring. */
7134	if ((error = iwn_nic_lock(sc)) != 0)
7135		return error;
7136	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
7137	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
7138	/* Set physical address of RX ring (256-byte aligned). */
7139	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
7140	/* Set physical address of RX status (16-byte aligned). */
7141	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
7142	/* Enable RX. */
7143	IWN_WRITE(sc, IWN_FH_RX_CONFIG,
7144	    IWN_FH_RX_CONFIG_ENA           |
7145	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
7146	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
7147	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
7148	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
7149	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
7150	iwn_nic_unlock(sc);
7151	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
7152
7153	if ((error = iwn_nic_lock(sc)) != 0)
7154		return error;
7155
7156	/* Initialize TX scheduler. */
7157	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7158
7159	/* Set physical address of "keep warm" page (16-byte aligned). */
7160	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
7161
7162	/* Initialize TX rings. */
7163	for (qid = 0; qid < sc->ntxqs; qid++) {
7164		struct iwn_tx_ring *txq = &sc->txq[qid];
7165
7166		/* Set physical address of TX ring (256-byte aligned). */
7167		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
7168		    txq->desc_dma.paddr >> 8);
7169	}
7170	iwn_nic_unlock(sc);
7171
7172	/* Enable DMA channels. */
7173	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7174		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
7175		    IWN_FH_TX_CONFIG_DMA_ENA |
7176		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
7177	}
7178
7179	/* Clear "radio off" and "commands blocked" bits. */
7180	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7181	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
7182
7183	/* Clear pending interrupts. */
7184	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7185	/* Enable interrupt coalescing. */
7186	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
7187	/* Enable interrupts. */
7188	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7189
7190	/* _Really_ make sure "radio off" bit is cleared! */
7191	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7192	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7193
7194	/* Enable shadow registers. */
7195	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
7196		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
7197
7198	if ((error = ops->load_firmware(sc)) != 0) {
7199		device_printf(sc->sc_dev,
7200		    "%s: could not load firmware, error %d\n", __func__,
7201		    error);
7202		return error;
7203	}
7204	/* Wait at most one second for firmware alive notification. */
7205	if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
7206		device_printf(sc->sc_dev,
7207		    "%s: timeout waiting for adapter to initialize, error %d\n",
7208		    __func__, error);
7209		return error;
7210	}
7211	/* Do post-firmware initialization. */
7212
7213	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7214
7215	return ops->post_alive(sc);
7216}
7217
7218static void
7219iwn_hw_stop(struct iwn_softc *sc)
7220{
7221	int chnl, qid, ntries;
7222
7223	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7224
7225	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
7226
7227	/* Disable interrupts. */
7228	IWN_WRITE(sc, IWN_INT_MASK, 0);
7229	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7230	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
7231	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7232
7233	/* Make sure we no longer hold the NIC lock. */
7234	iwn_nic_unlock(sc);
7235
7236	/* Stop TX scheduler. */
7237	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7238
7239	/* Stop all DMA channels. */
7240	if (iwn_nic_lock(sc) == 0) {
7241		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7242			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
7243			for (ntries = 0; ntries < 200; ntries++) {
7244				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
7245				    IWN_FH_TX_STATUS_IDLE(chnl))
7246					break;
7247				DELAY(10);
7248			}
7249		}
7250		iwn_nic_unlock(sc);
7251	}
7252
7253	/* Stop RX ring. */
7254	iwn_reset_rx_ring(sc, &sc->rxq);
7255
7256	/* Reset all TX rings. */
7257	for (qid = 0; qid < sc->ntxqs; qid++)
7258		iwn_reset_tx_ring(sc, &sc->txq[qid]);
7259
7260	if (iwn_nic_lock(sc) == 0) {
7261		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
7262		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7263		iwn_nic_unlock(sc);
7264	}
7265	DELAY(5);
7266	/* Power OFF adapter. */
7267	iwn_apm_stop(sc);
7268}
7269
7270static void
7271iwn_radio_on(void *arg0, int pending)
7272{
7273	struct iwn_softc *sc = arg0;
7274	struct ifnet *ifp = sc->sc_ifp;
7275	struct ieee80211com *ic = ifp->if_l2com;
7276	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7277
7278	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7279
7280	if (vap != NULL) {
7281		iwn_init(sc);
7282		ieee80211_init(vap);
7283	}
7284}
7285
7286static void
7287iwn_radio_off(void *arg0, int pending)
7288{
7289	struct iwn_softc *sc = arg0;
7290	struct ifnet *ifp = sc->sc_ifp;
7291	struct ieee80211com *ic = ifp->if_l2com;
7292	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7293
7294	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7295
7296	iwn_stop(sc);
7297	if (vap != NULL)
7298		ieee80211_stop(vap);
7299
7300	/* Enable interrupts to get RF toggle notification. */
7301	IWN_LOCK(sc);
7302	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7303	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7304	IWN_UNLOCK(sc);
7305}
7306
7307static void
7308iwn_init_locked(struct iwn_softc *sc)
7309{
7310	struct ifnet *ifp = sc->sc_ifp;
7311	int error;
7312
7313	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7314
7315	IWN_LOCK_ASSERT(sc);
7316
7317	if ((error = iwn_hw_prepare(sc)) != 0) {
7318		device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
7319		    __func__, error);
7320		goto fail;
7321	}
7322
7323	/* Initialize interrupt mask to default value. */
7324	sc->int_mask = IWN_INT_MASK_DEF;
7325	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7326
7327	/* Check that the radio is not disabled by hardware switch. */
7328	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7329		device_printf(sc->sc_dev,
7330		    "radio is disabled by hardware switch\n");
7331		/* Enable interrupts to get RF toggle notifications. */
7332		IWN_WRITE(sc, IWN_INT, 0xffffffff);
7333		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7334		return;
7335	}
7336
7337	/* Read firmware images from the filesystem. */
7338	if ((error = iwn_read_firmware(sc)) != 0) {
7339		device_printf(sc->sc_dev,
7340		    "%s: could not read firmware, error %d\n", __func__,
7341		    error);
7342		goto fail;
7343	}
7344
7345	/* Initialize hardware and upload firmware. */
7346	error = iwn_hw_init(sc);
7347	firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
7348	sc->fw_fp = NULL;
7349	if (error != 0) {
7350		device_printf(sc->sc_dev,
7351		    "%s: could not initialize hardware, error %d\n", __func__,
7352		    error);
7353		goto fail;
7354	}
7355
7356	/* Configure adapter now that it is ready. */
7357	if ((error = iwn_config(sc)) != 0) {
7358		device_printf(sc->sc_dev,
7359		    "%s: could not configure device, error %d\n", __func__,
7360		    error);
7361		goto fail;
7362	}
7363
7364	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
7365	ifp->if_drv_flags |= IFF_DRV_RUNNING;
7366
7367	callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
7368
7369	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7370
7371	return;
7372
7373fail:	iwn_stop_locked(sc);
7374	DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
7375}
7376
7377static void
7378iwn_init(void *arg)
7379{
7380	struct iwn_softc *sc = arg;
7381	struct ifnet *ifp = sc->sc_ifp;
7382	struct ieee80211com *ic = ifp->if_l2com;
7383
7384	IWN_LOCK(sc);
7385	iwn_init_locked(sc);
7386	IWN_UNLOCK(sc);
7387
7388	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
7389		ieee80211_start_all(ic);
7390}
7391
7392static void
7393iwn_stop_locked(struct iwn_softc *sc)
7394{
7395	struct ifnet *ifp = sc->sc_ifp;
7396
7397	IWN_LOCK_ASSERT(sc);
7398
7399	sc->sc_tx_timer = 0;
7400	callout_stop(&sc->watchdog_to);
7401	callout_stop(&sc->calib_to);
7402	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7403
7404	/* Power OFF hardware. */
7405	iwn_hw_stop(sc);
7406}
7407
7408static void
7409iwn_stop(struct iwn_softc *sc)
7410{
7411	IWN_LOCK(sc);
7412	iwn_stop_locked(sc);
7413	IWN_UNLOCK(sc);
7414}
7415
7416/*
7417 * Callback from net80211 to start a scan.
7418 */
7419static void
7420iwn_scan_start(struct ieee80211com *ic)
7421{
7422	struct ifnet *ifp = ic->ic_ifp;
7423	struct iwn_softc *sc = ifp->if_softc;
7424
7425	IWN_LOCK(sc);
7426	/* make the link LED blink while we're scanning */
7427	iwn_set_led(sc, IWN_LED_LINK, 20, 2);
7428	IWN_UNLOCK(sc);
7429}
7430
7431/*
7432 * Callback from net80211 to terminate a scan.
7433 */
7434static void
7435iwn_scan_end(struct ieee80211com *ic)
7436{
7437	struct ifnet *ifp = ic->ic_ifp;
7438	struct iwn_softc *sc = ifp->if_softc;
7439	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7440
7441	IWN_LOCK(sc);
7442	if (vap->iv_state == IEEE80211_S_RUN) {
7443		/* Set link LED to ON status if we are associated */
7444		iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7445	}
7446	IWN_UNLOCK(sc);
7447}
7448
7449/*
7450 * Callback from net80211 to force a channel change.
7451 */
7452static void
7453iwn_set_channel(struct ieee80211com *ic)
7454{
7455	const struct ieee80211_channel *c = ic->ic_curchan;
7456	struct ifnet *ifp = ic->ic_ifp;
7457	struct iwn_softc *sc = ifp->if_softc;
7458	int error;
7459
7460	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7461
7462	IWN_LOCK(sc);
7463	sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
7464	sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
7465	sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
7466	sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
7467
7468	/*
7469	 * Only need to set the channel in Monitor mode. AP scanning and auth
7470	 * are already taken care of by their respective firmware commands.
7471	 */
7472	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7473		error = iwn_config(sc);
7474		if (error != 0)
7475		device_printf(sc->sc_dev,
7476		    "%s: error %d settting channel\n", __func__, error);
7477	}
7478	IWN_UNLOCK(sc);
7479}
7480
7481/*
7482 * Callback from net80211 to start scanning of the current channel.
7483 */
7484static void
7485iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
7486{
7487	struct ieee80211vap *vap = ss->ss_vap;
7488	struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
7489	int error;
7490
7491	IWN_LOCK(sc);
7492	error = iwn_scan(sc);
7493	IWN_UNLOCK(sc);
7494	if (error != 0)
7495		ieee80211_cancel_scan(vap);
7496}
7497
7498/*
7499 * Callback from net80211 to handle the minimum dwell time being met.
7500 * The intent is to terminate the scan but we just let the firmware
7501 * notify us when it's finished as we have no safe way to abort it.
7502 */
7503static void
7504iwn_scan_mindwell(struct ieee80211_scan_state *ss)
7505{
7506	/* NB: don't try to abort scan; wait for firmware to finish */
7507}
7508
7509static void
7510iwn_hw_reset(void *arg0, int pending)
7511{
7512	struct iwn_softc *sc = arg0;
7513	struct ifnet *ifp = sc->sc_ifp;
7514	struct ieee80211com *ic = ifp->if_l2com;
7515
7516	DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7517
7518	iwn_stop(sc);
7519	iwn_init(sc);
7520	ieee80211_notify_radio(ic, 1);
7521}
7522#ifdef	IWN_DEBUG
7523#define	IWN_DESC(x) case x:	return #x
7524#define	COUNTOF(array) (sizeof(array) / sizeof(array[0]))
7525
7526/*
7527 * Translate CSR code to string
7528 */
7529static char *iwn_get_csr_string(int csr)
7530{
7531	switch (csr) {
7532		IWN_DESC(IWN_HW_IF_CONFIG);
7533		IWN_DESC(IWN_INT_COALESCING);
7534		IWN_DESC(IWN_INT);
7535		IWN_DESC(IWN_INT_MASK);
7536		IWN_DESC(IWN_FH_INT);
7537		IWN_DESC(IWN_GPIO_IN);
7538		IWN_DESC(IWN_RESET);
7539		IWN_DESC(IWN_GP_CNTRL);
7540		IWN_DESC(IWN_HW_REV);
7541		IWN_DESC(IWN_EEPROM);
7542		IWN_DESC(IWN_EEPROM_GP);
7543		IWN_DESC(IWN_OTP_GP);
7544		IWN_DESC(IWN_GIO);
7545		IWN_DESC(IWN_GP_UCODE);
7546		IWN_DESC(IWN_GP_DRIVER);
7547		IWN_DESC(IWN_UCODE_GP1);
7548		IWN_DESC(IWN_UCODE_GP2);
7549		IWN_DESC(IWN_LED);
7550		IWN_DESC(IWN_DRAM_INT_TBL);
7551		IWN_DESC(IWN_GIO_CHICKEN);
7552		IWN_DESC(IWN_ANA_PLL);
7553		IWN_DESC(IWN_HW_REV_WA);
7554		IWN_DESC(IWN_DBG_HPET_MEM);
7555	default:
7556		return "UNKNOWN CSR";
7557	}
7558}
7559
7560/*
7561 * This function print firmware register
7562 */
7563static void
7564iwn_debug_register(struct iwn_softc *sc)
7565{
7566	int i;
7567	static const uint32_t csr_tbl[] = {
7568		IWN_HW_IF_CONFIG,
7569		IWN_INT_COALESCING,
7570		IWN_INT,
7571		IWN_INT_MASK,
7572		IWN_FH_INT,
7573		IWN_GPIO_IN,
7574		IWN_RESET,
7575		IWN_GP_CNTRL,
7576		IWN_HW_REV,
7577		IWN_EEPROM,
7578		IWN_EEPROM_GP,
7579		IWN_OTP_GP,
7580		IWN_GIO,
7581		IWN_GP_UCODE,
7582		IWN_GP_DRIVER,
7583		IWN_UCODE_GP1,
7584		IWN_UCODE_GP2,
7585		IWN_LED,
7586		IWN_DRAM_INT_TBL,
7587		IWN_GIO_CHICKEN,
7588		IWN_ANA_PLL,
7589		IWN_HW_REV_WA,
7590		IWN_DBG_HPET_MEM,
7591	};
7592	DPRINTF(sc, IWN_DEBUG_REGISTER,
7593	    "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s",
7594	    "\n");
7595	for (i = 0; i <  COUNTOF(csr_tbl); i++){
7596		DPRINTF(sc, IWN_DEBUG_REGISTER,"  %10s: 0x%08x ",
7597			iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i]));
7598		if ((i+1) % 3 == 0)
7599			DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
7600	}
7601	DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
7602}
7603#endif
7604