xref: /illumos-gate/usr/src/uts/common/io/iwn/if_iwn.c (revision e5b103bb)
1 /*	$NetBSD: if_iwn.c,v 1.78 2016/06/10 13:27:14 ozaki-r Exp $	*/
2 /*	$OpenBSD: if_iwn.c,v 1.135 2014/09/10 07:22:09 dcoppa Exp $	*/
3 
4 /*-
5  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
22  */
23 
24 /*
25  * Driver for Intel WiFi Link 4965 and 100/1000/2000/5000/6000 Series 802.11
26  * network adapters.
27  */
28 
29 /*
30  * TODO:
31  * - turn tunables into driver properties
32  */
33 
34 #undef IWN_HWCRYPTO	/* XXX does not even compile yet */
35 
36 #include <sys/modctl.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/stat.h>
40 
41 #include <sys/param.h>
42 #include <sys/sockio.h>
43 #include <sys/proc.h>
44 #include <sys/socket.h>
45 #include <sys/systm.h>
46 #include <sys/mutex.h>
47 #include <sys/conf.h>
48 
49 #include <sys/pci.h>
50 #include <sys/pcie.h>
51 
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip.h>
61 
62 #include <sys/dlpi.h>
63 #include <sys/mac_provider.h>
64 #include <sys/mac_wifi.h>
65 #include <sys/net80211.h>
66 #include <sys/firmload.h>
67 #include <sys/queue.h>
68 #include <sys/strsun.h>
69 #include <sys/strsubr.h>
70 #include <sys/sysmacros.h>
71 #include <sys/types.h>
72 #include <sys/kstat.h>
73 
74 #include <sys/sdt.h>
75 
76 #include "if_iwncompat.h"
77 #include "if_iwnreg.h"
78 #include "if_iwnvar.h"
79 #include <inet/wifi_ioctl.h>
80 
81 #ifdef DEBUG
82 #define IWN_DEBUG
83 #endif
84 
85 /*
86  * regs access attributes
87  */
88 static ddi_device_acc_attr_t iwn_reg_accattr = {
89 	.devacc_attr_version	= DDI_DEVICE_ATTR_V0,
90 	.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
91 	.devacc_attr_dataorder	= DDI_STRICTORDER_ACC,
92 	.devacc_attr_access	= DDI_DEFAULT_ACC
93 };
94 
95 /*
96  * DMA access attributes for descriptor
97  */
98 static ddi_device_acc_attr_t iwn_dma_descattr = {
99 	.devacc_attr_version	= DDI_DEVICE_ATTR_V0,
100 	.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
101 	.devacc_attr_dataorder	= DDI_STRICTORDER_ACC,
102 	.devacc_attr_access	= DDI_DEFAULT_ACC
103 };
104 
105 /*
106  * DMA access attributes
107  */
108 static ddi_device_acc_attr_t iwn_dma_accattr = {
109 	.devacc_attr_version	= DDI_DEVICE_ATTR_V0,
110 	.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC,
111 	.devacc_attr_dataorder	= DDI_STRICTORDER_ACC,
112 	.devacc_attr_access	= DDI_DEFAULT_ACC
113 };
114 
115 
116 /*
117  * Supported rates for 802.11a/b/g modes (in 500Kbps unit).
118  */
119 static const struct ieee80211_rateset iwn_rateset_11a =
120 	{ 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
121 
122 static const struct ieee80211_rateset iwn_rateset_11b =
123 	{ 4, { 2, 4, 11, 22 } };
124 
125 static const struct ieee80211_rateset iwn_rateset_11g =
126 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
127 
128 static void	iwn_kstat_create(struct iwn_softc *, const char *, size_t,
129     kstat_t **, void **);
130 static void	iwn_kstat_free(kstat_t *, void *, size_t);
131 static void	iwn_kstat_init(struct iwn_softc *);
132 static void	iwn_kstat_init_2000(struct iwn_softc *);
133 static void	iwn_kstat_init_4965(struct iwn_softc *);
134 static void	iwn_kstat_init_6000(struct iwn_softc *);
135 static void	iwn_intr_teardown(struct iwn_softc *);
136 static int	iwn_intr_add(struct iwn_softc *, int);
137 static int	iwn_intr_setup(struct iwn_softc *);
138 static int	iwn_attach(dev_info_t *, ddi_attach_cmd_t);
139 static int	iwn4965_attach(struct iwn_softc *);
140 static int	iwn5000_attach(struct iwn_softc *, uint16_t);
141 static int	iwn_detach(dev_info_t *, ddi_detach_cmd_t);
142 static int	iwn_quiesce(dev_info_t *);
143 static int	iwn_nic_lock(struct iwn_softc *);
144 static int	iwn_eeprom_lock(struct iwn_softc *);
145 static int	iwn_init_otprom(struct iwn_softc *);
146 static int	iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
147 static int	iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
148     uint_t, uint_t, void **, ddi_device_acc_attr_t *, uint_t);
149 static void	iwn_dma_contig_free(struct iwn_dma_info *);
150 static int	iwn_alloc_sched(struct iwn_softc *);
151 static void	iwn_free_sched(struct iwn_softc *);
152 static int	iwn_alloc_kw(struct iwn_softc *);
153 static void	iwn_free_kw(struct iwn_softc *);
154 static int	iwn_alloc_ict(struct iwn_softc *);
155 static void	iwn_free_ict(struct iwn_softc *);
156 static int	iwn_alloc_fwmem(struct iwn_softc *);
157 static void	iwn_free_fwmem(struct iwn_softc *);
158 static int	iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
159 static void	iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
160 static void	iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
161 static int	iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
162 		    int);
163 static void	iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
164 static void	iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
165 static void	iwn5000_ict_reset(struct iwn_softc *);
166 static int	iwn_read_eeprom(struct iwn_softc *);
167 static void	iwn4965_read_eeprom(struct iwn_softc *);
168 
169 #ifdef IWN_DEBUG
170 static void	iwn4965_print_power_group(struct iwn_softc *, int);
171 #endif
172 static void	iwn5000_read_eeprom(struct iwn_softc *);
173 static void	iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
174 static void	iwn_read_eeprom_enhinfo(struct iwn_softc *);
175 static struct	ieee80211_node *iwn_node_alloc(ieee80211com_t *);
176 static void	iwn_node_free(ieee80211_node_t *);
177 static void	iwn_newassoc(struct ieee80211_node *, int);
178 static int	iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
179 static void	iwn_iter_func(void *, struct ieee80211_node *);
180 static void	iwn_calib_timeout(void *);
181 static void	iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
182 		    struct iwn_rx_data *);
183 static void	iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
184 		    struct iwn_rx_data *);
185 #ifndef IEEE80211_NO_HT
186 static void	iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
187 		    struct iwn_rx_data *);
188 #endif
189 static void	iwn5000_rx_calib_results(struct iwn_softc *,
190 		    struct iwn_rx_desc *, struct iwn_rx_data *);
191 static void	iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
192 		    struct iwn_rx_data *);
193 static void	iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
194 		    struct iwn_rx_data *);
195 static void	iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
196 		    struct iwn_rx_data *);
197 static void	iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
198 		    uint8_t);
199 static void	iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
200 static void	iwn_notif_intr(struct iwn_softc *);
201 static void	iwn_wakeup_intr(struct iwn_softc *);
202 static void	iwn_fatal_intr(struct iwn_softc *);
203 static uint_t	iwn_intr(caddr_t, caddr_t);
204 static void	iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
205 		    uint16_t);
206 static void	iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
207 		    uint16_t);
208 #ifdef notyet
209 static void	iwn5000_reset_sched(struct iwn_softc *, int, int);
210 #endif
211 static int	iwn_send(ieee80211com_t *, mblk_t *, uint8_t);
212 static void	iwn_watchdog(void *);
213 static int	iwn_cmd(struct iwn_softc *, uint8_t, void *, int, int);
214 static int	iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
215 		    int);
216 static int	iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
217 		    int);
218 static int	iwn_set_link_quality(struct iwn_softc *,
219 		    struct ieee80211_node *);
220 static int	iwn_add_broadcast_node(struct iwn_softc *, int);
221 static void	iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
222 static int	iwn_set_critical_temp(struct iwn_softc *);
223 static int	iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
224 static void	iwn4965_power_calibration(struct iwn_softc *, int);
225 static int	iwn4965_set_txpower(struct iwn_softc *, int);
226 static int	iwn5000_set_txpower(struct iwn_softc *, int);
227 static int	iwn4965_get_rssi(const struct iwn_rx_stat *);
228 static int	iwn5000_get_rssi(const struct iwn_rx_stat *);
229 static int	iwn_get_noise(const struct iwn_rx_general_stats *);
230 static int	iwn4965_get_temperature(struct iwn_softc *);
231 static int	iwn5000_get_temperature(struct iwn_softc *);
232 static int	iwn_init_sensitivity(struct iwn_softc *);
233 static void	iwn_collect_noise(struct iwn_softc *,
234 		    const struct iwn_rx_general_stats *);
235 static int	iwn4965_init_gains(struct iwn_softc *);
236 static int	iwn5000_init_gains(struct iwn_softc *);
237 static int	iwn4965_set_gains(struct iwn_softc *);
238 static int	iwn5000_set_gains(struct iwn_softc *);
239 static void	iwn_tune_sensitivity(struct iwn_softc *,
240 		    const struct iwn_rx_stats *);
241 static int	iwn_send_sensitivity(struct iwn_softc *);
242 static int	iwn_set_pslevel(struct iwn_softc *, int, int, int);
243 static int	iwn5000_runtime_calib(struct iwn_softc *);
244 
245 static int	iwn_config_bt_coex_bluetooth(struct iwn_softc *);
246 static int	iwn_config_bt_coex_prio_table(struct iwn_softc *);
247 static int	iwn_config_bt_coex_adv1(struct iwn_softc *);
248 static int	iwn_config_bt_coex_adv2(struct iwn_softc *);
249 
250 static int	iwn_config(struct iwn_softc *);
251 static uint16_t	iwn_get_active_dwell_time(struct iwn_softc *, uint16_t,
252 		    uint8_t);
253 static uint16_t	iwn_limit_dwell(struct iwn_softc *, uint16_t);
254 static uint16_t	iwn_get_passive_dwell_time(struct iwn_softc *, uint16_t);
255 static int	iwn_scan(struct iwn_softc *, uint16_t);
256 static int	iwn_auth(struct iwn_softc *);
257 static int	iwn_run(struct iwn_softc *);
258 #ifdef IWN_HWCRYPTO
259 static int	iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
260 		    struct ieee80211_key *);
261 static void	iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
262 		    struct ieee80211_key *);
263 #endif
264 static int	iwn_wme_update(struct ieee80211com *);
265 #ifndef IEEE80211_NO_HT
266 static int	iwn_ampdu_rx_start(struct ieee80211com *,
267 		    struct ieee80211_node *, uint8_t);
268 static void	iwn_ampdu_rx_stop(struct ieee80211com *,
269 		    struct ieee80211_node *, uint8_t);
270 static int	iwn_ampdu_tx_start(struct ieee80211com *,
271 		    struct ieee80211_node *, uint8_t);
272 static void	iwn_ampdu_tx_stop(struct ieee80211com *,
273 		    struct ieee80211_node *, uint8_t);
274 static void	iwn4965_ampdu_tx_start(struct iwn_softc *,
275 		    struct ieee80211_node *, uint8_t, uint16_t);
276 static void	iwn4965_ampdu_tx_stop(struct iwn_softc *,
277 		    uint8_t, uint16_t);
278 static void	iwn5000_ampdu_tx_start(struct iwn_softc *,
279 		    struct ieee80211_node *, uint8_t, uint16_t);
280 static void	iwn5000_ampdu_tx_stop(struct iwn_softc *,
281 		    uint8_t, uint16_t);
282 #endif
283 static int	iwn5000_query_calibration(struct iwn_softc *);
284 static int	iwn5000_send_calibration(struct iwn_softc *);
285 static int	iwn5000_send_wimax_coex(struct iwn_softc *);
286 static int	iwn6000_temp_offset_calib(struct iwn_softc *);
287 static int	iwn2000_temp_offset_calib(struct iwn_softc *);
288 static int	iwn4965_post_alive(struct iwn_softc *);
289 static int	iwn5000_post_alive(struct iwn_softc *);
290 static int	iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
291 		    int);
292 static int	iwn4965_load_firmware(struct iwn_softc *);
293 static int	iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
294 		    const uint8_t *, int);
295 static int	iwn5000_load_firmware(struct iwn_softc *);
296 static int	iwn_read_firmware_leg(struct iwn_softc *,
297 		    struct iwn_fw_info *);
298 static int	iwn_read_firmware_tlv(struct iwn_softc *,
299 		    struct iwn_fw_info *, uint16_t);
300 static int	iwn_read_firmware(struct iwn_softc *);
301 static int	iwn_clock_wait(struct iwn_softc *);
302 static int	iwn_apm_init(struct iwn_softc *);
303 static void	iwn_apm_stop_master(struct iwn_softc *);
304 static void	iwn_apm_stop(struct iwn_softc *);
305 static int	iwn4965_nic_config(struct iwn_softc *);
306 static int	iwn5000_nic_config(struct iwn_softc *);
307 static int	iwn_hw_prepare(struct iwn_softc *);
308 static int	iwn_hw_init(struct iwn_softc *);
309 static void	iwn_hw_stop(struct iwn_softc *, boolean_t);
310 static int	iwn_init(struct iwn_softc *);
311 static void	iwn_abort_scan(void *);
312 static void	iwn_periodic(void *);
313 static int	iwn_fast_recover(struct iwn_softc *);
314 
315 static uint8_t	*ieee80211_add_ssid(uint8_t *, const uint8_t *, uint32_t);
316 static uint8_t	*ieee80211_add_rates(uint8_t *,
317     const struct ieee80211_rateset *);
318 static uint8_t	*ieee80211_add_xrates(uint8_t *,
319     const struct ieee80211_rateset *);
320 
321 static void	iwn_fix_channel(struct iwn_softc *, mblk_t *,
322 		    struct iwn_rx_stat *);
323 
324 #ifdef IWN_DEBUG
325 
326 #define	IWN_DBG(...)	iwn_dbg("?" __VA_ARGS__)
327 
328 static int iwn_dbg_print = 0;
329 
330 static void
331 iwn_dbg(const char *fmt, ...)
332 {
333 	va_list	ap;
334 
335 	if (iwn_dbg_print != 0) {
336 		va_start(ap, fmt);
337 		vcmn_err(CE_CONT, fmt, ap);
338 		va_end(ap);
339 	}
340 }
341 
342 #else
343 #define	IWN_DBG(...)
344 #endif
345 
346 /*
347  * tunables
348  */
349 
350 /*
351  * enable 5GHz scanning
352  */
353 int iwn_enable_5ghz = 1;
354 
355 /*
356  * If more than 50 consecutive beacons are missed,
357  * we've probably lost our connection.
358  * If more than 5 consecutive beacons are missed,
359  * reinitialize the sensitivity state machine.
360  */
361 int iwn_beacons_missed_disconnect = 50;
362 int iwn_beacons_missed_sensitivity = 5;
363 
364 /*
365  * iwn_periodic interval, in units of msec
366  */
367 int iwn_periodic_interval = 100;
368 
369 /*
370  * scan timeout in sec
371  */
372 int iwn_scan_timeout = 20;
373 
374 static ether_addr_t etherbroadcastaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
375 
376 static void *iwn_state = NULL;
377 
378 /*
379  * Mac Call Back entries
380  */
381 static int	iwn_m_stat(void *, uint_t, uint64_t *);
382 static int	iwn_m_start(void *);
383 static void	iwn_m_stop(void *);
384 static int	iwn_m_unicst(void *, const uint8_t *);
385 static int	iwn_m_multicst(void *, boolean_t, const uint8_t *);
386 static int	iwn_m_promisc(void *, boolean_t);
387 static mblk_t	*iwn_m_tx(void *, mblk_t *);
388 static void	iwn_m_ioctl(void *, queue_t *, mblk_t *);
389 static int	iwn_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
390     const void *);
391 static int	iwn_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
392     void *);
393 static void	iwn_m_propinfo(void *, const char *, mac_prop_id_t,
394     mac_prop_info_handle_t);
395 
396 mac_callbacks_t	iwn_m_callbacks = {
397 	.mc_callbacks	= MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
398 	.mc_getstat	= iwn_m_stat,
399 	.mc_start	= iwn_m_start,
400 	.mc_stop	= iwn_m_stop,
401 	.mc_setpromisc	= iwn_m_promisc,
402 	.mc_multicst	= iwn_m_multicst,
403 	.mc_unicst	= iwn_m_unicst,
404 	.mc_tx		= iwn_m_tx,
405 	.mc_reserved	= NULL,
406 	.mc_ioctl	= iwn_m_ioctl,
407 	.mc_getcapab	= NULL,
408 	.mc_open	= NULL,
409 	.mc_close	= NULL,
410 	.mc_setprop	= iwn_m_setprop,
411 	.mc_getprop	= iwn_m_getprop,
412 	.mc_propinfo	= iwn_m_propinfo
413 };
414 
415 static inline uint32_t
416 iwn_read(struct iwn_softc *sc, int reg)
417 {
418 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
419 	return (ddi_get32(sc->sc_regh, (uint32_t *)(sc->sc_base + reg)));
420 }
421 
422 static inline void
423 iwn_write(struct iwn_softc *sc, int reg, uint32_t val)
424 {
425 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
426 	ddi_put32(sc->sc_regh, (uint32_t *)(sc->sc_base + reg), val);
427 }
428 
429 static inline void
430 iwn_write_1(struct iwn_softc *sc, int reg, uint8_t val)
431 {
432 	ddi_put8(sc->sc_regh, (uint8_t *)(sc->sc_base + reg), val);
433 }
434 
435 static void
436 iwn_kstat_create(struct iwn_softc *sc, const char *name, size_t size,
437     kstat_t **ks, void **data)
438 {
439 	*ks = kstat_create(ddi_driver_name(sc->sc_dip),
440 	    ddi_get_instance(sc->sc_dip), name, "misc", KSTAT_TYPE_NAMED,
441 	    size / sizeof (kstat_named_t), 0);
442 	if (*ks == NULL)
443 		*data = kmem_zalloc(size, KM_SLEEP);
444 	else
445 		*data = (*ks)->ks_data;
446 }
447 
448 static void
449 iwn_kstat_free(kstat_t *ks, void *data, size_t size)
450 {
451 	if (ks != NULL)
452 		kstat_delete(ks);
453 	else if (data != NULL)
454 		kmem_free(data, size);
455 }
456 
457 static void
458 iwn_kstat_init(struct iwn_softc *sc)
459 {
460 	if (sc->sc_ks_misc != NULL)
461 		sc->sc_ks_misc->ks_lock = &sc->sc_mtx;
462 	if (sc->sc_ks_ant != NULL)
463 		sc->sc_ks_ant->ks_lock = &sc->sc_mtx;
464 	if (sc->sc_ks_sens != NULL)
465 		sc->sc_ks_sens->ks_lock = &sc->sc_mtx;
466 	if (sc->sc_ks_timing != NULL)
467 		sc->sc_ks_timing->ks_lock = &sc->sc_mtx;
468 	if (sc->sc_ks_edca != NULL)
469 		sc->sc_ks_edca->ks_lock = &sc->sc_mtx;
470 
471 	kstat_named_init(&sc->sc_misc->temp,
472 	    "temperature", KSTAT_DATA_ULONG);
473 	kstat_named_init(&sc->sc_misc->crit_temp,
474 	    "critical temperature", KSTAT_DATA_ULONG);
475 	kstat_named_init(&sc->sc_misc->pslevel,
476 	    "power saving level", KSTAT_DATA_ULONG);
477 	kstat_named_init(&sc->sc_misc->noise,
478 	    "noise", KSTAT_DATA_LONG);
479 
480 
481 	kstat_named_init(&sc->sc_ant->tx_ant,
482 	    "TX mask", KSTAT_DATA_ULONG);
483 	kstat_named_init(&sc->sc_ant->rx_ant,
484 	    "RX mask", KSTAT_DATA_ULONG);
485 	kstat_named_init(&sc->sc_ant->conn_ant,
486 	    "connected mask", KSTAT_DATA_ULONG);
487 	kstat_named_init(&sc->sc_ant->gain[0],
488 	    "gain A", KSTAT_DATA_ULONG);
489 	kstat_named_init(&sc->sc_ant->gain[1],
490 	    "gain B", KSTAT_DATA_ULONG);
491 	kstat_named_init(&sc->sc_ant->gain[2],
492 	    "gain C", KSTAT_DATA_ULONG);
493 
494 	kstat_named_init(&sc->sc_sens->ofdm_x1,
495 	    "OFDM X1", KSTAT_DATA_ULONG);
496 	kstat_named_init(&sc->sc_sens->ofdm_mrc_x1,
497 	    "OFDM MRC X1", KSTAT_DATA_ULONG);
498 	kstat_named_init(&sc->sc_sens->ofdm_x4,
499 	    "OFDM X4", KSTAT_DATA_ULONG);
500 	kstat_named_init(&sc->sc_sens->ofdm_mrc_x4,
501 	    "OFDM MRC X4", KSTAT_DATA_ULONG);
502 	kstat_named_init(&sc->sc_sens->cck_x4,
503 	    "CCK X4", KSTAT_DATA_ULONG);
504 	kstat_named_init(&sc->sc_sens->cck_mrc_x4,
505 	    "CCK MRC X4", KSTAT_DATA_ULONG);
506 	kstat_named_init(&sc->sc_sens->energy_cck,
507 	    "energy CCK", KSTAT_DATA_ULONG);
508 
509 	kstat_named_init(&sc->sc_timing->bintval,
510 	    "bintval", KSTAT_DATA_ULONG);
511 	kstat_named_init(&sc->sc_timing->tstamp,
512 	    "timestamp", KSTAT_DATA_ULONGLONG);
513 	kstat_named_init(&sc->sc_timing->init,
514 	    "init", KSTAT_DATA_ULONG);
515 
516 	kstat_named_init(&sc->sc_edca->ac[0].cwmin,
517 	    "background cwmin", KSTAT_DATA_ULONG);
518 	kstat_named_init(&sc->sc_edca->ac[0].cwmax,
519 	    "background cwmax", KSTAT_DATA_ULONG);
520 	kstat_named_init(&sc->sc_edca->ac[0].aifsn,
521 	    "background aifsn", KSTAT_DATA_ULONG);
522 	kstat_named_init(&sc->sc_edca->ac[0].txop,
523 	    "background txop", KSTAT_DATA_ULONG);
524 	kstat_named_init(&sc->sc_edca->ac[1].cwmin,
525 	    "best effort cwmin", KSTAT_DATA_ULONG);
526 	kstat_named_init(&sc->sc_edca->ac[1].cwmax,
527 	    "best effort cwmax", KSTAT_DATA_ULONG);
528 	kstat_named_init(&sc->sc_edca->ac[1].aifsn,
529 	    "best effort aifsn", KSTAT_DATA_ULONG);
530 	kstat_named_init(&sc->sc_edca->ac[1].txop,
531 	    "best effort txop", KSTAT_DATA_ULONG);
532 	kstat_named_init(&sc->sc_edca->ac[2].cwmin,
533 	    "video cwmin", KSTAT_DATA_ULONG);
534 	kstat_named_init(&sc->sc_edca->ac[2].cwmax,
535 	    "video cwmax", KSTAT_DATA_ULONG);
536 	kstat_named_init(&sc->sc_edca->ac[2].aifsn,
537 	    "video aifsn", KSTAT_DATA_ULONG);
538 	kstat_named_init(&sc->sc_edca->ac[2].txop,
539 	    "video txop", KSTAT_DATA_ULONG);
540 	kstat_named_init(&sc->sc_edca->ac[3].cwmin,
541 	    "voice cwmin", KSTAT_DATA_ULONG);
542 	kstat_named_init(&sc->sc_edca->ac[3].cwmax,
543 	    "voice cwmax", KSTAT_DATA_ULONG);
544 	kstat_named_init(&sc->sc_edca->ac[3].aifsn,
545 	    "voice aifsn", KSTAT_DATA_ULONG);
546 	kstat_named_init(&sc->sc_edca->ac[3].txop,
547 	    "voice txop", KSTAT_DATA_ULONG);
548 }
549 
550 static void
551 iwn_kstat_init_2000(struct iwn_softc *sc)
552 {
553 	if (sc->sc_ks_toff != NULL)
554 		sc->sc_ks_toff->ks_lock = &sc->sc_mtx;
555 
556 	kstat_named_init(&sc->sc_toff.t2000->toff_lo,
557 	    "temperature offset low", KSTAT_DATA_LONG);
558 	kstat_named_init(&sc->sc_toff.t2000->toff_hi,
559 	    "temperature offset high", KSTAT_DATA_LONG);
560 	kstat_named_init(&sc->sc_toff.t2000->volt,
561 	    "reference voltage", KSTAT_DATA_LONG);
562 }
563 
564 static void
565 iwn_kstat_init_4965(struct iwn_softc *sc)
566 {
567 	int i, r;
568 
569 	if (sc->sc_ks_txpower != NULL)
570 		sc->sc_ks_txpower->ks_lock = &sc->sc_mtx;
571 
572 	kstat_named_init(&sc->sc_txpower->vdiff,
573 	    "voltage comp", KSTAT_DATA_LONG);
574 	kstat_named_init(&sc->sc_txpower->chan,
575 	    "channel", KSTAT_DATA_LONG);
576 	kstat_named_init(&sc->sc_txpower->group,
577 	    "attenuation group", KSTAT_DATA_LONG);
578 	kstat_named_init(&sc->sc_txpower->subband,
579 	    "sub-band", KSTAT_DATA_LONG);
580 	for (i = 0; i != 2; i++) {
581 		char tmp[KSTAT_STRLEN];
582 
583 		(void) snprintf(tmp, KSTAT_STRLEN - 1, "Ant %d power", i);
584 		kstat_named_init(&sc->sc_txpower->txchain[i].power,
585 		    tmp, KSTAT_DATA_LONG);
586 
587 		(void) snprintf(tmp, KSTAT_STRLEN - 1, "Ant %d gain", i);
588 		kstat_named_init(&sc->sc_txpower->txchain[i].gain,
589 		    tmp, KSTAT_DATA_LONG);
590 
591 		(void) snprintf(tmp, KSTAT_STRLEN - 1, "Ant %d temperature", i);
592 		kstat_named_init(&sc->sc_txpower->txchain[i].temp,
593 		    tmp, KSTAT_DATA_LONG);
594 
595 		(void) snprintf(tmp, KSTAT_STRLEN - 1,
596 		    "Ant %d temperature compensation", i);
597 		kstat_named_init(&sc->sc_txpower->txchain[i].tcomp,
598 		    tmp, KSTAT_DATA_LONG);
599 
600 		for (r = 0; r != IWN_RIDX_MAX; r++) {
601 			(void) snprintf(tmp, KSTAT_STRLEN - 1,
602 			    "Ant %d Rate %d RF gain", i, r);
603 			kstat_named_init(
604 			    &sc->sc_txpower->txchain[i].rate[r].rf_gain,
605 			    tmp, KSTAT_DATA_LONG);
606 
607 			(void) snprintf(tmp, KSTAT_STRLEN - 1,
608 			    "Ant %d Rate %d DSP gain", i, r);
609 			kstat_named_init(
610 			    &sc->sc_txpower->txchain[0].rate[0].dsp_gain,
611 			    tmp, KSTAT_DATA_LONG);
612 		}
613 	}
614 }
615 
616 static void
617 iwn_kstat_init_6000(struct iwn_softc *sc)
618 {
619 	if (sc->sc_ks_toff != NULL)
620 		sc->sc_ks_toff->ks_lock = &sc->sc_mtx;
621 
622 	kstat_named_init(&sc->sc_toff.t6000->toff,
623 	    "temperature offset", KSTAT_DATA_LONG);
624 }
625 
626 static void
627 iwn_intr_teardown(struct iwn_softc *sc)
628 {
629 	if (sc->sc_intr_htable != NULL) {
630 		if ((sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) {
631 			(void) ddi_intr_block_disable(sc->sc_intr_htable,
632 			    sc->sc_intr_count);
633 		} else {
634 			(void) ddi_intr_disable(sc->sc_intr_htable[0]);
635 		}
636 		(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
637 		(void) ddi_intr_free(sc->sc_intr_htable[0]);
638 		sc->sc_intr_htable[0] = NULL;
639 
640 		kmem_free(sc->sc_intr_htable, sc->sc_intr_size);
641 		sc->sc_intr_size = 0;
642 		sc->sc_intr_htable = NULL;
643 	}
644 }
645 
646 static int
647 iwn_intr_add(struct iwn_softc *sc, int intr_type)
648 {
649 	int ni, na;
650 	int ret;
651 	char *func;
652 
653 	if (ddi_intr_get_nintrs(sc->sc_dip, intr_type, &ni) != DDI_SUCCESS)
654 		return (DDI_FAILURE);
655 
656 
657 	if (ddi_intr_get_navail(sc->sc_dip, intr_type, &na) != DDI_SUCCESS)
658 		return (DDI_FAILURE);
659 
660 	sc->sc_intr_size = sizeof (ddi_intr_handle_t);
661 	sc->sc_intr_htable = kmem_zalloc(sc->sc_intr_size, KM_SLEEP);
662 
663 	ret = ddi_intr_alloc(sc->sc_dip, sc->sc_intr_htable, intr_type, 0, 1,
664 	    &sc->sc_intr_count, DDI_INTR_ALLOC_STRICT);
665 	if (ret != DDI_SUCCESS) {
666 		dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_alloc() failed");
667 		return (DDI_FAILURE);
668 	}
669 
670 	ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
671 	if (ret != DDI_SUCCESS) {
672 		dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_get_pri() failed");
673 		return (DDI_FAILURE);
674 	}
675 
676 	ret = ddi_intr_add_handler(sc->sc_intr_htable[0], iwn_intr, (caddr_t)sc,
677 	    NULL);
678 	if (ret != DDI_SUCCESS) {
679 		dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_add_handler() failed");
680 		return (DDI_FAILURE);
681 	}
682 
683 	ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
684 	if (ret != DDI_SUCCESS) {
685 		dev_err(sc->sc_dip, CE_WARN, "!ddi_intr_get_cap() failed");
686 		return (DDI_FAILURE);
687 	}
688 
689 	if ((sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) != 0) {
690 		ret = ddi_intr_block_enable(sc->sc_intr_htable,
691 		    sc->sc_intr_count);
692 		func = "ddi_intr_enable_block";
693 	} else {
694 		ret = ddi_intr_enable(sc->sc_intr_htable[0]);
695 		func = "ddi_intr_enable";
696 	}
697 
698 	if (ret != DDI_SUCCESS) {
699 		dev_err(sc->sc_dip, CE_WARN, "!%s() failed", func);
700 		return (DDI_FAILURE);
701 	}
702 
703 	return (DDI_SUCCESS);
704 }
705 
706 static int
707 iwn_intr_setup(struct iwn_softc *sc)
708 {
709 	int intr_type;
710 	int ret;
711 
712 	ret = ddi_intr_get_supported_types(sc->sc_dip, &intr_type);
713 	if (ret != DDI_SUCCESS) {
714 		dev_err(sc->sc_dip, CE_WARN,
715 		    "!ddi_intr_get_supported_types() failed");
716 		return (DDI_FAILURE);
717 	}
718 
719 	if ((intr_type & DDI_INTR_TYPE_MSIX)) {
720 		if (iwn_intr_add(sc, DDI_INTR_TYPE_MSIX) == DDI_SUCCESS)
721 			return (DDI_SUCCESS);
722 		iwn_intr_teardown(sc);
723 	}
724 
725 	if ((intr_type & DDI_INTR_TYPE_MSI)) {
726 		if (iwn_intr_add(sc, DDI_INTR_TYPE_MSI) == DDI_SUCCESS)
727 			return (DDI_SUCCESS);
728 		iwn_intr_teardown(sc);
729 	}
730 
731 	if ((intr_type & DDI_INTR_TYPE_FIXED)) {
732 		if (iwn_intr_add(sc, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS)
733 			return (DDI_SUCCESS);
734 		iwn_intr_teardown(sc);
735 	}
736 
737 	dev_err(sc->sc_dip, CE_WARN, "!iwn_intr_setup() failed");
738 	return (DDI_FAILURE);
739 }
740 
741 static int
742 iwn_pci_get_capability(ddi_acc_handle_t pcih, int cap, int *cap_off)
743 {
744 	uint8_t ptr;
745 	uint8_t val;
746 
747 	for (ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR);
748 	    ptr != 0 && ptr != 0xff;
749 	    ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR)) {
750 		val = pci_config_get8(pcih, ptr + PCIE_CAP_ID);
751 		if (val == 0xff)
752 			return (DDI_FAILURE);
753 
754 		if (cap != val)
755 			continue;
756 
757 		*cap_off = ptr;
758 		return (DDI_SUCCESS);
759 	}
760 
761 	return (DDI_FAILURE);
762 }
763 
764 static int
765 iwn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
766 {
767 	int instance;
768 
769 	struct iwn_softc *sc;
770 	struct ieee80211com *ic;
771 	char strbuf[32];
772 	wifi_data_t wd = { 0 };
773 	mac_register_t *macp;
774 	uint32_t reg;
775 	int i, error;
776 
777 	switch (cmd) {
778 	case DDI_ATTACH:
779 		break;
780 
781 	case DDI_RESUME:
782 		instance = ddi_get_instance(dip);
783 		sc = ddi_get_soft_state(iwn_state,
784 		    instance);
785 		ASSERT(sc != NULL);
786 
787 		if (sc->sc_flags & IWN_FLAG_RUNNING) {
788 			(void) iwn_init(sc);
789 		}
790 
791 		sc->sc_flags &= ~IWN_FLAG_SUSPEND;
792 
793 		return (DDI_SUCCESS);
794 	default:
795 		return (DDI_FAILURE);
796 	}
797 
798 	instance = ddi_get_instance(dip);
799 
800 	if (ddi_soft_state_zalloc(iwn_state, instance) != DDI_SUCCESS) {
801 		dev_err(dip, CE_WARN, "!ddi_soft_state_zalloc() failed");
802 		return (DDI_FAILURE);
803 	}
804 
805 	sc = ddi_get_soft_state(iwn_state, instance);
806 	ddi_set_driver_private(dip, (caddr_t)sc);
807 
808 	ic = &sc->sc_ic;
809 
810 	sc->sc_dip = dip;
811 
812 	iwn_kstat_create(sc, "hw_state", sizeof (struct iwn_ks_misc),
813 	    &sc->sc_ks_misc, (void **)&sc->sc_misc);
814 	iwn_kstat_create(sc, "antennas", sizeof (struct iwn_ks_ant),
815 	    &sc->sc_ks_ant, (void **)&sc->sc_ant);
816 	iwn_kstat_create(sc, "sensitivity", sizeof (struct iwn_ks_sens),
817 	    &sc->sc_ks_sens, (void **)&sc->sc_sens);
818 	iwn_kstat_create(sc, "timing", sizeof (struct iwn_ks_timing),
819 	    &sc->sc_ks_timing, (void **)&sc->sc_timing);
820 	iwn_kstat_create(sc, "edca", sizeof (struct iwn_ks_edca),
821 	    &sc->sc_ks_edca, (void **)&sc->sc_edca);
822 
823 	if (pci_config_setup(dip, &sc->sc_pcih) != DDI_SUCCESS) {
824 		dev_err(sc->sc_dip, CE_WARN, "!pci_config_setup() failed");
825 		goto fail_pci_config;
826 	}
827 
828 	/*
829 	 * Get the offset of the PCI Express Capability Structure in PCI
830 	 * Configuration Space.
831 	 */
832 	error = iwn_pci_get_capability(sc->sc_pcih, PCI_CAP_ID_PCI_E,
833 	    &sc->sc_cap_off);
834 	if (error != DDI_SUCCESS) {
835 		dev_err(sc->sc_dip, CE_WARN,
836 		    "!PCIe capability structure not found!");
837 		goto fail_pci_capab;
838 	}
839 
840 	/* Clear device-specific "PCI retry timeout" register (41h). */
841 	reg = pci_config_get8(sc->sc_pcih, 0x41);
842 	if (reg)
843 		pci_config_put8(sc->sc_pcih, 0x41, 0);
844 
845 	error = ddi_regs_map_setup(dip, 1, &sc->sc_base, 0, 0, &iwn_reg_accattr,
846 	    &sc->sc_regh);
847 	if (error != DDI_SUCCESS) {
848 		dev_err(sc->sc_dip, CE_WARN, "!ddi_regs_map_setup() failed");
849 		goto fail_regs_map;
850 	}
851 
852 	/* Clear pending interrupts. */
853 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
854 
855 	/* Disable all interrupts. */
856 	IWN_WRITE(sc, IWN_INT_MASK, 0);
857 
858 	/* Install interrupt handler. */
859 	if (iwn_intr_setup(sc) != DDI_SUCCESS)
860 		goto fail_intr;
861 
862 	mutex_init(&sc->sc_mtx, NULL, MUTEX_DRIVER,
863 	    DDI_INTR_PRI(sc->sc_intr_pri));
864 	mutex_init(&sc->sc_tx_mtx, NULL, MUTEX_DRIVER,
865 	    DDI_INTR_PRI(sc->sc_intr_pri));
866 	mutex_init(&sc->sc_mt_mtx, NULL, MUTEX_DRIVER,
867 	    DDI_INTR_PRI(sc->sc_intr_pri));
868 
869 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
870 	cv_init(&sc->sc_scan_cv, NULL, CV_DRIVER, NULL);
871 	cv_init(&sc->sc_fhdma_cv, NULL, CV_DRIVER, NULL);
872 	cv_init(&sc->sc_alive_cv, NULL, CV_DRIVER, NULL);
873 	cv_init(&sc->sc_calib_cv, NULL, CV_DRIVER, NULL);
874 
875 	iwn_kstat_init(sc);
876 
877 	/* Read hardware revision and attach. */
878 	sc->hw_type =
879 	    (IWN_READ(sc, IWN_HW_REV) & IWN_HW_REV_TYPE_MASK)
880 	      >> IWN_HW_REV_TYPE_SHIFT;
881 	if (sc->hw_type == IWN_HW_REV_TYPE_4965)
882 		error = iwn4965_attach(sc);
883 	else
884 		error = iwn5000_attach(sc, sc->sc_devid);
885 	if (error != 0) {
886 		dev_err(sc->sc_dip, CE_WARN, "!could not attach device");
887 		goto fail_hw;
888 	}
889 
890 	if ((error = iwn_hw_prepare(sc)) != 0) {
891 		dev_err(sc->sc_dip, CE_WARN, "!hardware not ready");
892 		goto fail_hw;
893 	}
894 
895 	/* Read MAC address, channels, etc from EEPROM. */
896 	if ((error = iwn_read_eeprom(sc)) != 0) {
897 		dev_err(sc->sc_dip, CE_WARN, "!could not read EEPROM");
898 		goto fail_hw;
899 	}
900 
901 	/* Allocate DMA memory for firmware transfers. */
902 	if ((error = iwn_alloc_fwmem(sc)) != 0) {
903 		dev_err(sc->sc_dip, CE_WARN,
904 		    "!could not allocate memory for firmware");
905 		goto fail_fwmem;
906 	}
907 
908 	/* Allocate "Keep Warm" page. */
909 	if ((error = iwn_alloc_kw(sc)) != 0) {
910 		dev_err(sc->sc_dip, CE_WARN,
911 		    "!could not allocate keep warm page");
912 		goto fail_kw;
913 	}
914 
915 	/* Allocate ICT table for 5000 Series. */
916 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
917 	    (error = iwn_alloc_ict(sc)) != 0) {
918 		dev_err(sc->sc_dip, CE_WARN, "!could not allocate ICT table");
919 		goto fail_ict;
920 	}
921 
922 	/* Allocate TX scheduler "rings". */
923 	if ((error = iwn_alloc_sched(sc)) != 0) {
924 		dev_err(sc->sc_dip, CE_WARN,
925 		    "!could not allocate TX scheduler rings");
926 		goto fail_sched;
927 	}
928 
929 	/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
930 	for (i = 0; i < sc->ntxqs; i++) {
931 		if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
932 			dev_err(sc->sc_dip, CE_WARN,
933 			    "!could not allocate TX ring %d", i);
934 			while (--i >= 0)
935 				iwn_free_tx_ring(sc, &sc->txq[i]);
936 			goto fail_txring;
937 		}
938 	}
939 
940 	/* Allocate RX ring. */
941 	if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
942 		dev_err(sc->sc_dip, CE_WARN, "!could not allocate RX ring");
943 		goto fail_rxring;
944 	}
945 
946 	/* Clear pending interrupts. */
947 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
948 
949 	/* Count the number of available chains. */
950 	sc->ntxchains =
951 	    ((sc->txchainmask >> 2) & 1) +
952 	    ((sc->txchainmask >> 1) & 1) +
953 	    ((sc->txchainmask >> 0) & 1);
954 	sc->nrxchains =
955 	    ((sc->rxchainmask >> 2) & 1) +
956 	    ((sc->rxchainmask >> 1) & 1) +
957 	    ((sc->rxchainmask >> 0) & 1);
958 	dev_err(sc->sc_dip, CE_CONT, "!MIMO %dT%dR, %s, address %s",
959 	    sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
960 	    ieee80211_macaddr_sprintf(ic->ic_macaddr));
961 
962 	sc->sc_ant->tx_ant.value.ul = sc->txchainmask;
963 	sc->sc_ant->rx_ant.value.ul = sc->rxchainmask;
964 
965 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
966 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
967 	ic->ic_state = IEEE80211_S_INIT;
968 
969 	/* Set device capabilities. */
970 	/* XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN,
971 	 * and IEEE80211_C_PMGT too. */
972 	ic->ic_caps =
973 	    IEEE80211_C_IBSS |		/* IBSS mode support */
974 	    IEEE80211_C_WPA |		/* 802.11i */
975 	    IEEE80211_C_MONITOR |	/* monitor mode supported */
976 	    IEEE80211_C_TXPMGT |	/* tx power management */
977 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
978 	    IEEE80211_C_SHPREAMBLE |	/* short preamble supported */
979 	    IEEE80211_C_WME;		/* 802.11e */
980 
981 #ifndef IEEE80211_NO_HT
982 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
983 		/* Set HT capabilities. */
984 		ic->ic_htcaps =
985 #if IWN_RBUF_SIZE == 8192
986 		    IEEE80211_HTCAP_AMSDU7935 |
987 #endif
988 		    IEEE80211_HTCAP_CBW20_40 |
989 		    IEEE80211_HTCAP_SGI20 |
990 		    IEEE80211_HTCAP_SGI40;
991 		if (sc->hw_type != IWN_HW_REV_TYPE_4965)
992 			ic->ic_htcaps |= IEEE80211_HTCAP_GF;
993 		if (sc->hw_type == IWN_HW_REV_TYPE_6050)
994 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
995 		else
996 			ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
997 	}
998 #endif	/* !IEEE80211_NO_HT */
999 
1000 	/* Set supported legacy rates. */
1001 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwn_rateset_11b;
1002 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwn_rateset_11g;
1003 	if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
1004 		ic->ic_sup_rates[IEEE80211_MODE_11A] = iwn_rateset_11a;
1005 	}
1006 #ifndef IEEE80211_NO_HT
1007 	if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1008 		/* Set supported HT rates. */
1009 		ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
1010 		if (sc->nrxchains > 1)
1011 			ic->ic_sup_mcs[1] = 0xff;	/* MCS 7-15 */
1012 		if (sc->nrxchains > 2)
1013 			ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
1014 	}
1015 #endif
1016 
1017 	/* IBSS channel undefined for now. */
1018 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
1019 
1020 	ic->ic_node_newassoc = iwn_newassoc;
1021 	ic->ic_xmit = iwn_send;
1022 #ifdef IWN_HWCRYPTO
1023 	ic->ic_crypto.cs_key_set = iwn_set_key;
1024 	ic->ic_crypto.cs_key_delete = iwn_delete_key;
1025 #endif
1026 	ic->ic_wme.wme_update = iwn_wme_update;
1027 #ifndef IEEE80211_NO_HT
1028 	ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
1029 	ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
1030 	ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
1031 	ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
1032 #endif
1033 	/*
1034 	 * attach to 802.11 module
1035 	 */
1036 	ieee80211_attach(ic);
1037 
1038 	ieee80211_register_door(ic, ddi_driver_name(dip), ddi_get_instance(dip));
1039 
1040 	/* Override 802.11 state transition machine. */
1041 	sc->sc_newstate = ic->ic_newstate;
1042 	ic->ic_newstate = iwn_newstate;
1043 	ic->ic_watchdog = iwn_watchdog;
1044 
1045 	ic->ic_node_alloc = iwn_node_alloc;
1046 	ic->ic_node_free = iwn_node_free;
1047 
1048 	ieee80211_media_init(ic);
1049 
1050 	/*
1051 	 * initialize default tx key
1052 	 */
1053 	ic->ic_def_txkey = 0;
1054 
1055 	sc->amrr.amrr_min_success_threshold =  1;
1056 	sc->amrr.amrr_max_success_threshold = 15;
1057 
1058 	/*
1059 	 * Initialize pointer to device specific functions
1060 	 */
1061 	wd.wd_secalloc = WIFI_SEC_NONE;
1062 	wd.wd_opmode = ic->ic_opmode;
1063 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
1064 
1065 	/*
1066 	 * create relation to GLD
1067 	 */
1068 	macp = mac_alloc(MAC_VERSION);
1069 	if (NULL == macp) {
1070 		dev_err(sc->sc_dip, CE_WARN, "!mac_alloc() failed");
1071 		goto fail_mac_alloc;
1072 	}
1073 
1074 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
1075 	macp->m_driver		= sc;
1076 	macp->m_dip		= dip;
1077 	macp->m_src_addr	= ic->ic_macaddr;
1078 	macp->m_callbacks	= &iwn_m_callbacks;
1079 	macp->m_min_sdu		= 0;
1080 	macp->m_max_sdu		= IEEE80211_MTU;
1081 	macp->m_pdata		= &wd;
1082 	macp->m_pdata_size	= sizeof (wd);
1083 
1084 	/*
1085 	 * Register the macp to mac
1086 	 */
1087 	error = mac_register(macp, &ic->ic_mach);
1088 	mac_free(macp);
1089 	if (error != DDI_SUCCESS) {
1090 		dev_err(sc->sc_dip, CE_WARN, "!mac_register() failed");
1091 		goto fail_mac_alloc;
1092 	}
1093 
1094 	/*
1095 	 * Create minor node of type DDI_NT_NET_WIFI
1096 	 */
1097 	(void) snprintf(strbuf, sizeof (strbuf), "iwn%d", instance);
1098 	error = ddi_create_minor_node(dip, strbuf, S_IFCHR,
1099 	    instance + 1, DDI_NT_NET_WIFI, 0);
1100 	if (error != DDI_SUCCESS) {
1101 		dev_err(sc->sc_dip, CE_WARN, "!ddi_create_minor_node() failed");
1102 		goto fail_minor;
1103 	}
1104 
1105 	/*
1106 	 * Notify link is down now
1107 	 */
1108 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
1109 
1110 	sc->sc_periodic = ddi_periodic_add(iwn_periodic, sc,
1111 	    iwn_periodic_interval * MICROSEC, 0);
1112 
1113 	if (sc->sc_ks_misc)
1114 		kstat_install(sc->sc_ks_misc);
1115 	if (sc->sc_ks_ant)
1116 		kstat_install(sc->sc_ks_ant);
1117 	if (sc->sc_ks_sens)
1118 		kstat_install(sc->sc_ks_sens);
1119 	if (sc->sc_ks_timing)
1120 		kstat_install(sc->sc_ks_timing);
1121 	if (sc->sc_ks_edca)
1122 		kstat_install(sc->sc_ks_edca);
1123 	if (sc->sc_ks_txpower)
1124 		kstat_install(sc->sc_ks_txpower);
1125 	if (sc->sc_ks_toff)
1126 		kstat_install(sc->sc_ks_toff);
1127 
1128 	sc->sc_flags |= IWN_FLAG_ATTACHED;
1129 
1130 	return (DDI_SUCCESS);
1131 
1132 	/* Free allocated memory if something failed during attachment. */
1133 fail_minor:
1134 	mac_unregister(ic->ic_mach);
1135 
1136 fail_mac_alloc:
1137 	ieee80211_detach(ic);
1138 	iwn_free_rx_ring(sc, &sc->rxq);
1139 
1140 fail_rxring:
1141 	for (i = 0; i < sc->ntxqs; i++)
1142 		iwn_free_tx_ring(sc, &sc->txq[i]);
1143 
1144 fail_txring:
1145 	iwn_free_sched(sc);
1146 
1147 fail_sched:
1148 	if (sc->ict != NULL)
1149 		iwn_free_ict(sc);
1150 
1151 fail_ict:
1152 	iwn_free_kw(sc);
1153 
1154 fail_kw:
1155 	iwn_free_fwmem(sc);
1156 
1157 fail_fwmem:
1158 fail_hw:
1159 	iwn_intr_teardown(sc);
1160 
1161 	iwn_kstat_free(sc->sc_ks_txpower, sc->sc_txpower,
1162 	    sizeof (struct iwn_ks_txpower));
1163 
1164 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
1165 		iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t6000,
1166 		    sizeof (struct iwn_ks_toff_6000));
1167 	else
1168 		iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t2000,
1169 		    sizeof (struct iwn_ks_toff_2000));
1170 
1171 fail_intr:
1172 	ddi_regs_map_free(&sc->sc_regh);
1173 
1174 fail_regs_map:
1175 fail_pci_capab:
1176 	pci_config_teardown(&sc->sc_pcih);
1177 
1178 fail_pci_config:
1179 	iwn_kstat_free(sc->sc_ks_misc, sc->sc_misc,
1180 	    sizeof (struct iwn_ks_misc));
1181 	iwn_kstat_free(sc->sc_ks_ant, sc->sc_ant,
1182 	    sizeof (struct iwn_ks_ant));
1183 	iwn_kstat_free(sc->sc_ks_sens, sc->sc_sens,
1184 	    sizeof (struct iwn_ks_sens));
1185 	iwn_kstat_free(sc->sc_ks_timing, sc->sc_timing,
1186 	    sizeof (struct iwn_ks_timing));
1187 	iwn_kstat_free(sc->sc_ks_edca, sc->sc_edca,
1188 	    sizeof (struct iwn_ks_edca));
1189 
1190 	ddi_soft_state_free(iwn_state, instance);
1191 
1192 	return (DDI_FAILURE);
1193 }
1194 
1195 int
1196 iwn4965_attach(struct iwn_softc *sc)
1197 {
1198 	struct iwn_ops *ops = &sc->ops;
1199 
1200 	ops->load_firmware = iwn4965_load_firmware;
1201 	ops->read_eeprom = iwn4965_read_eeprom;
1202 	ops->post_alive = iwn4965_post_alive;
1203 	ops->nic_config = iwn4965_nic_config;
1204 	ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
1205 	ops->update_sched = iwn4965_update_sched;
1206 	ops->get_temperature = iwn4965_get_temperature;
1207 	ops->get_rssi = iwn4965_get_rssi;
1208 	ops->set_txpower = iwn4965_set_txpower;
1209 	ops->init_gains = iwn4965_init_gains;
1210 	ops->set_gains = iwn4965_set_gains;
1211 	ops->add_node = iwn4965_add_node;
1212 	ops->tx_done = iwn4965_tx_done;
1213 #ifndef IEEE80211_NO_HT
1214 	ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
1215 	ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
1216 #endif
1217 	sc->ntxqs = IWN4965_NTXQUEUES;
1218 	sc->ndmachnls = IWN4965_NDMACHNLS;
1219 	sc->broadcast_id = IWN4965_ID_BROADCAST;
1220 	sc->rxonsz = IWN4965_RXONSZ;
1221 	sc->schedsz = IWN4965_SCHEDSZ;
1222 	sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
1223 	sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
1224 	sc->fwsz = IWN4965_FWSZ;
1225 	sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
1226 	sc->limits = &iwn4965_sensitivity_limits;
1227 	sc->fwname = "iwlwifi-4965-2.ucode";
1228 	/* Override chains masks, ROM is known to be broken. */
1229 	sc->txchainmask = IWN_ANT_AB;
1230 	sc->rxchainmask = IWN_ANT_ABC;
1231 
1232 	iwn_kstat_create(sc, "txpower", sizeof (struct iwn_ks_txpower),
1233 	    &sc->sc_ks_txpower, (void **)&sc->sc_txpower);
1234 	iwn_kstat_init_4965(sc);
1235 
1236 	return 0;
1237 }
1238 
1239 int
1240 iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
1241 {
1242 	struct iwn_ops *ops = &sc->ops;
1243 
1244 	ops->load_firmware = iwn5000_load_firmware;
1245 	ops->read_eeprom = iwn5000_read_eeprom;
1246 	ops->post_alive = iwn5000_post_alive;
1247 	ops->nic_config = iwn5000_nic_config;
1248 	ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
1249 	ops->update_sched = iwn5000_update_sched;
1250 	ops->get_temperature = iwn5000_get_temperature;
1251 	ops->get_rssi = iwn5000_get_rssi;
1252 	ops->set_txpower = iwn5000_set_txpower;
1253 	ops->init_gains = iwn5000_init_gains;
1254 	ops->set_gains = iwn5000_set_gains;
1255 	ops->add_node = iwn5000_add_node;
1256 	ops->tx_done = iwn5000_tx_done;
1257 #ifndef IEEE80211_NO_HT
1258 	ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
1259 	ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
1260 #endif
1261 	sc->ntxqs = IWN5000_NTXQUEUES;
1262 	sc->ndmachnls = IWN5000_NDMACHNLS;
1263 	sc->broadcast_id = IWN5000_ID_BROADCAST;
1264 	sc->rxonsz = IWN5000_RXONSZ;
1265 	sc->schedsz = IWN5000_SCHEDSZ;
1266 	sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
1267 	sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
1268 	sc->fwsz = IWN5000_FWSZ;
1269 	sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
1270 
1271 	switch (sc->hw_type) {
1272 	case IWN_HW_REV_TYPE_5100:
1273 		sc->limits = &iwn5000_sensitivity_limits;
1274 		sc->fwname = "iwlwifi-5000-2.ucode";
1275 		/* Override chains masks, ROM is known to be broken. */
1276 		sc->txchainmask = IWN_ANT_B;
1277 		sc->rxchainmask = IWN_ANT_AB;
1278 		break;
1279 	case IWN_HW_REV_TYPE_5150:
1280 		sc->limits = &iwn5150_sensitivity_limits;
1281 		sc->fwname = "iwlwifi-5150-2.ucode";
1282 		break;
1283 	case IWN_HW_REV_TYPE_5300:
1284 	case IWN_HW_REV_TYPE_5350:
1285 		sc->limits = &iwn5000_sensitivity_limits;
1286 		sc->fwname = "iwlwifi-5000-2.ucode";
1287 		break;
1288 	case IWN_HW_REV_TYPE_1000:
1289 		sc->limits = &iwn1000_sensitivity_limits;
1290 		if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_1 ||
1291 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_100_2)
1292 			sc->fwname = "iwlwifi-100-5.ucode";
1293 		else
1294 			sc->fwname = "iwlwifi-1000-3.ucode";
1295 		break;
1296 	case IWN_HW_REV_TYPE_6000:
1297 		sc->limits = &iwn6000_sensitivity_limits;
1298 		sc->fwname = "iwlwifi-6000-4.ucode";
1299 		if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 ||
1300 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) {
1301 			sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
1302 			/* Override chains masks, ROM is known to be broken. */
1303 			sc->txchainmask = IWN_ANT_BC;
1304 			sc->rxchainmask = IWN_ANT_BC;
1305 		}
1306 		break;
1307 	case IWN_HW_REV_TYPE_6050:
1308 		sc->limits = &iwn6000_sensitivity_limits;
1309 		sc->fwname = "iwlwifi-6050-5.ucode";
1310 		break;
1311 	case IWN_HW_REV_TYPE_6005:
1312 		sc->limits = &iwn6000_sensitivity_limits;
1313 		/* Type 6030 cards return IWN_HW_REV_TYPE_6005 */
1314 		if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 ||
1315 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 ||
1316 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 ||
1317 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 ||
1318 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235   ||
1319 		    pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235_2) {
1320 			sc->fwname = "iwlwifi-6000g2b-6.ucode";
1321 			ops->config_bt_coex = iwn_config_bt_coex_adv1;
1322 		}
1323 		else
1324 			sc->fwname = "iwlwifi-6000g2a-6.ucode";
1325 
1326 		iwn_kstat_create(sc, "temp_offset",
1327 		    sizeof (struct iwn_ks_toff_6000),
1328 		    &sc->sc_ks_toff, (void **)&sc->sc_toff.t6000);
1329 		iwn_kstat_init_6000(sc);
1330 		break;
1331 	case IWN_HW_REV_TYPE_2030:
1332 		sc->limits = &iwn2000_sensitivity_limits;
1333 		sc->fwname = "iwlwifi-2030-6.ucode";
1334 		ops->config_bt_coex = iwn_config_bt_coex_adv2;
1335 
1336 		iwn_kstat_create(sc, "temp_offset",
1337 		    sizeof (struct iwn_ks_toff_2000),
1338 		    &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1339 		iwn_kstat_init_2000(sc);
1340 		break;
1341 	case IWN_HW_REV_TYPE_2000:
1342 		sc->limits = &iwn2000_sensitivity_limits;
1343 		sc->fwname = "iwlwifi-2000-6.ucode";
1344 
1345 		iwn_kstat_create(sc, "temp_offset",
1346 		    sizeof (struct iwn_ks_toff_2000),
1347 		    &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1348 		iwn_kstat_init_2000(sc);
1349 		break;
1350 	case IWN_HW_REV_TYPE_135:
1351 		sc->limits = &iwn2000_sensitivity_limits;
1352 		sc->fwname = "iwlwifi-135-6.ucode";
1353 		ops->config_bt_coex = iwn_config_bt_coex_adv2;
1354 
1355 		iwn_kstat_create(sc, "temp_offset",
1356 		    sizeof (struct iwn_ks_toff_2000),
1357 		    &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1358 		iwn_kstat_init_2000(sc);
1359 		break;
1360 	case IWN_HW_REV_TYPE_105:
1361 		sc->limits = &iwn2000_sensitivity_limits;
1362 		sc->fwname = "iwlwifi-105-6.ucode";
1363 
1364 		iwn_kstat_create(sc, "temp_offset",
1365 		    sizeof (struct iwn_ks_toff_2000),
1366 		    &sc->sc_ks_toff, (void **)&sc->sc_toff.t2000);
1367 		iwn_kstat_init_2000(sc);
1368 		break;
1369 	default:
1370 		dev_err(sc->sc_dip, CE_WARN, "!adapter type %d not supported",
1371 		    sc->hw_type);
1372 		return ENOTSUP;
1373 	}
1374 	return 0;
1375 }
1376 
1377 static int
1378 iwn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1379 {
1380 	struct iwn_softc *sc = ddi_get_driver_private(dip);
1381 	ieee80211com_t *ic = &sc->sc_ic;
1382 	int qid, error;
1383 
1384 	switch (cmd) {
1385 	case DDI_DETACH:
1386 		break;
1387 	case DDI_SUSPEND:
1388 		sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
1389 		sc->sc_flags &= ~IWN_FLAG_RATE_AUTO_CTL;
1390 
1391 		sc->sc_flags |= IWN_FLAG_SUSPEND;
1392 
1393 		if (sc->sc_flags & IWN_FLAG_RUNNING) {
1394 			iwn_hw_stop(sc, B_TRUE);
1395 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1396 
1397 		}
1398 
1399 		return (DDI_SUCCESS);
1400 	default:
1401 		return (DDI_FAILURE);
1402 	}
1403 
1404 	if (!(sc->sc_flags & IWN_FLAG_ATTACHED)) {
1405 		return (DDI_FAILURE);
1406 	}
1407 
1408 	error = mac_disable(ic->ic_mach);
1409 	if (error != DDI_SUCCESS)
1410 		return (error);
1411 
1412 	mutex_enter(&sc->sc_mtx);
1413 	sc->sc_flags |= IWN_FLAG_STOP_CALIB_TO;
1414 	mutex_exit(&sc->sc_mtx);
1415 
1416 	if (sc->calib_to != 0)
1417 		(void) untimeout(sc->calib_to);
1418 	sc->calib_to = 0;
1419 
1420 	if (sc->scan_to != 0)
1421 		(void) untimeout(sc->scan_to);
1422 	sc->scan_to = 0;
1423 
1424 	ddi_periodic_delete(sc->sc_periodic);
1425 
1426 	/*
1427 	 * stop chipset
1428 	 */
1429 	iwn_hw_stop(sc, B_TRUE);
1430 
1431 	/*
1432 	 * Unregister from GLD
1433 	 */
1434 	(void) mac_unregister(ic->ic_mach);
1435 	ieee80211_detach(ic);
1436 
1437 	/* Uninstall interrupt handler. */
1438 	iwn_intr_teardown(sc);
1439 
1440 	/* Free DMA resources. */
1441 	mutex_enter(&sc->sc_mtx);
1442 	iwn_free_rx_ring(sc, &sc->rxq);
1443 	for (qid = 0; qid < sc->ntxqs; qid++)
1444 		iwn_free_tx_ring(sc, &sc->txq[qid]);
1445 	iwn_free_sched(sc);
1446 	iwn_free_kw(sc);
1447 	if (sc->ict != NULL)
1448 		iwn_free_ict(sc);
1449 	iwn_free_fwmem(sc);
1450 	mutex_exit(&sc->sc_mtx);
1451 
1452 	iwn_kstat_free(sc->sc_ks_misc, sc->sc_misc,
1453 	    sizeof (struct iwn_ks_misc));
1454 	iwn_kstat_free(sc->sc_ks_ant, sc->sc_ant,
1455 	    sizeof (struct iwn_ks_ant));
1456 	iwn_kstat_free(sc->sc_ks_sens, sc->sc_sens,
1457 	    sizeof (struct iwn_ks_sens));
1458 	iwn_kstat_free(sc->sc_ks_timing, sc->sc_timing,
1459 	    sizeof (struct iwn_ks_timing));
1460 	iwn_kstat_free(sc->sc_ks_edca, sc->sc_edca,
1461 	    sizeof (struct iwn_ks_edca));
1462 	iwn_kstat_free(sc->sc_ks_txpower, sc->sc_txpower,
1463 	    sizeof (struct iwn_ks_txpower));
1464 
1465 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
1466 		iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t6000,
1467 		    sizeof (struct iwn_ks_toff_6000));
1468 	else
1469 		iwn_kstat_free(sc->sc_ks_toff, sc->sc_toff.t2000,
1470 		    sizeof (struct iwn_ks_toff_2000));
1471 
1472 	ddi_regs_map_free(&sc->sc_regh);
1473 	pci_config_teardown(&sc->sc_pcih);
1474 	ddi_remove_minor_node(dip, NULL);
1475 	ddi_soft_state_free(iwn_state, ddi_get_instance(dip));
1476 
1477 	return 0;
1478 }
1479 
1480 static int
1481 iwn_quiesce(dev_info_t *dip)
1482 {
1483 	struct iwn_softc *sc;
1484 
1485 	sc = ddi_get_soft_state(iwn_state, ddi_get_instance(dip));
1486 	if (sc == NULL)
1487 		return (DDI_FAILURE);
1488 
1489 #ifdef IWN_DEBUG
1490 	/* bypass any messages */
1491 	iwn_dbg_print = 0;
1492 #endif
1493 
1494 	/*
1495 	 * No more blocking is allowed while we are in the
1496 	 * quiesce(9E) entry point.
1497 	 */
1498 	sc->sc_flags |= IWN_FLAG_QUIESCED;
1499 
1500 	/*
1501 	 * Disable and mask all interrupts.
1502 	 */
1503 	iwn_hw_stop(sc, B_FALSE);
1504 
1505 	return (DDI_SUCCESS);
1506 }
1507 
1508 static int
1509 iwn_nic_lock(struct iwn_softc *sc)
1510 {
1511 	int ntries;
1512 
1513 	/* Request exclusive access to NIC. */
1514 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1515 
1516 	/* Spin until we actually get the lock. */
1517 	for (ntries = 0; ntries < 1000; ntries++) {
1518 		if ((IWN_READ(sc, IWN_GP_CNTRL) &
1519 		     (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1520 		    IWN_GP_CNTRL_MAC_ACCESS_ENA)
1521 			return 0;
1522 		DELAY(10);
1523 	}
1524 	return ETIMEDOUT;
1525 }
1526 
1527 static __inline void
1528 iwn_nic_unlock(struct iwn_softc *sc)
1529 {
1530 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1531 }
1532 
1533 static __inline uint32_t
1534 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1535 {
1536 	IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1537 	IWN_BARRIER_READ_WRITE(sc);
1538 	return IWN_READ(sc, IWN_PRPH_RDATA);
1539 }
1540 
1541 static __inline void
1542 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1543 {
1544 	IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1545 	IWN_BARRIER_WRITE(sc);
1546 	IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1547 }
1548 
1549 static __inline void
1550 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1551 {
1552 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1553 }
1554 
1555 static __inline void
1556 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1557 {
1558 	iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1559 }
1560 
1561 static __inline void
1562 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1563     const uint32_t *data, int count)
1564 {
1565 	for (; count > 0; count--, data++, addr += 4)
1566 		iwn_prph_write(sc, addr, *data);
1567 }
1568 
1569 static __inline uint32_t
1570 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1571 {
1572 	IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1573 	IWN_BARRIER_READ_WRITE(sc);
1574 	return IWN_READ(sc, IWN_MEM_RDATA);
1575 }
1576 
1577 static __inline void
1578 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1579 {
1580 	IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1581 	IWN_BARRIER_WRITE(sc);
1582 	IWN_WRITE(sc, IWN_MEM_WDATA, data);
1583 }
1584 
1585 #ifndef IEEE80211_NO_HT
1586 static __inline void
1587 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1588 {
1589 	uint32_t tmp;
1590 
1591 	tmp = iwn_mem_read(sc, addr & ~3);
1592 	if (addr & 3)
1593 		tmp = (tmp & 0x0000ffff) | data << 16;
1594 	else
1595 		tmp = (tmp & 0xffff0000) | data;
1596 	iwn_mem_write(sc, addr & ~3, tmp);
1597 }
1598 #endif
1599 
1600 static __inline void
1601 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1602     int count)
1603 {
1604 	for (; count > 0; count--, addr += 4)
1605 		*data++ = iwn_mem_read(sc, addr);
1606 }
1607 
1608 static __inline void
1609 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1610     int count)
1611 {
1612 	for (; count > 0; count--, addr += 4)
1613 		iwn_mem_write(sc, addr, val);
1614 }
1615 
1616 static int
1617 iwn_eeprom_lock(struct iwn_softc *sc)
1618 {
1619 	int i, ntries;
1620 
1621 	for (i = 0; i < 100; i++) {
1622 		/* Request exclusive access to EEPROM. */
1623 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1624 		    IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1625 
1626 		/* Spin until we actually get the lock. */
1627 		for (ntries = 0; ntries < 100; ntries++) {
1628 			if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1629 			    IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1630 				return 0;
1631 			DELAY(10);
1632 		}
1633 	}
1634 	return ETIMEDOUT;
1635 }
1636 
1637 static __inline void
1638 iwn_eeprom_unlock(struct iwn_softc *sc)
1639 {
1640 	IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1641 }
1642 
1643 /*
1644  * Initialize access by host to One Time Programmable ROM.
1645  * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1646  */
1647 static int
1648 iwn_init_otprom(struct iwn_softc *sc)
1649 {
1650 	uint16_t prev = 0, base, next;
1651 	int count, error;
1652 
1653 	/* Wait for clock stabilization before accessing prph. */
1654 	if ((error = iwn_clock_wait(sc)) != 0)
1655 		return error;
1656 
1657 	if ((error = iwn_nic_lock(sc)) != 0)
1658 		return error;
1659 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1660 	DELAY(5);
1661 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1662 	iwn_nic_unlock(sc);
1663 
1664 	/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1665 	if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1666 		IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1667 		    IWN_RESET_LINK_PWR_MGMT_DIS);
1668 	}
1669 	IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1670 	/* Clear ECC status. */
1671 	IWN_SETBITS(sc, IWN_OTP_GP,
1672 	    IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1673 
1674 	/*
1675 	 * Find the block before last block (contains the EEPROM image)
1676 	 * for HW without OTP shadow RAM.
1677 	 */
1678 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1679 		/* Switch to absolute addressing mode. */
1680 		IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1681 		base = 0;
1682 		for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1683 			error = iwn_read_prom_data(sc, base, &next, 2);
1684 			if (error != 0)
1685 				return error;
1686 			if (next == 0)	/* End of linked-list. */
1687 				break;
1688 			prev = base;
1689 			base = le16toh(next);
1690 		}
1691 		if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1692 			return EIO;
1693 		/* Skip "next" word. */
1694 		sc->prom_base = prev + 1;
1695 	}
1696 	return 0;
1697 }
1698 
1699 static int
1700 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1701 {
1702 	uint8_t *out = data;
1703 	uint32_t val, tmp;
1704 	int ntries;
1705 
1706 	addr += sc->prom_base;
1707 	for (; count > 0; count -= 2, addr++) {
1708 		IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1709 		for (ntries = 0; ntries < 10; ntries++) {
1710 			val = IWN_READ(sc, IWN_EEPROM);
1711 			if (val & IWN_EEPROM_READ_VALID)
1712 				break;
1713 			DELAY(5);
1714 		}
1715 		if (ntries == 10) {
1716 			dev_err(sc->sc_dip, CE_WARN,
1717 			    "!timeout reading ROM at 0x%x", addr);
1718 			return ETIMEDOUT;
1719 		}
1720 		if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1721 			/* OTPROM, check for ECC errors. */
1722 			tmp = IWN_READ(sc, IWN_OTP_GP);
1723 			if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1724 				dev_err(sc->sc_dip, CE_WARN,
1725 				    "!OTPROM ECC error at 0x%x", addr);
1726 				return EIO;
1727 			}
1728 			if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1729 				/* Correctable ECC error, clear bit. */
1730 				IWN_SETBITS(sc, IWN_OTP_GP,
1731 				    IWN_OTP_GP_ECC_CORR_STTS);
1732 			}
1733 		}
1734 		*out++ = val >> 16;
1735 		if (count > 1)
1736 			*out++ = val >> 24;
1737 	}
1738 	return 0;
1739 }
1740 
1741 static int
1742 iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1743     uint_t size, uint_t flags, void **kvap, ddi_device_acc_attr_t *acc_attr,
1744     uint_t align)
1745 {
1746 	ddi_dma_attr_t dma_attr = {
1747 		.dma_attr_version	= DMA_ATTR_V0,
1748 		.dma_attr_addr_lo	= 0,
1749 		.dma_attr_addr_hi	= 0xfffffffffULL,
1750 		.dma_attr_count_max	= 0xfffffffffULL,
1751 		.dma_attr_align		= align,
1752 		.dma_attr_burstsizes	= 0x7ff,
1753 		.dma_attr_minxfer	= 1,
1754 		.dma_attr_maxxfer	= 0xfffffffffULL,
1755 		.dma_attr_seg		= 0xfffffffffULL,
1756 		.dma_attr_sgllen	= 1,
1757 		.dma_attr_granular	= 1,
1758 		.dma_attr_flags		= 0,
1759 	};
1760 	int error;
1761 
1762 	error = ddi_dma_alloc_handle(sc->sc_dip, &dma_attr, DDI_DMA_SLEEP, NULL,
1763 	    &dma->dma_hdl);
1764 	if (error != DDI_SUCCESS) {
1765 		dev_err(sc->sc_dip, CE_WARN,
1766 		    "ddi_dma_alloc_handle() failed, error = %d", error);
1767 		goto fail;
1768 	}
1769 
1770 	error = ddi_dma_mem_alloc(dma->dma_hdl, size, acc_attr,
1771 	    flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), DDI_DMA_SLEEP, 0,
1772 	    &dma->vaddr, &dma->length, &dma->acc_hdl);
1773 	if (error != DDI_SUCCESS) {
1774 		dev_err(sc->sc_dip, CE_WARN,
1775 		    "ddi_dma_mem_alloc() failed, error = %d", error);
1776 		goto fail2;
1777 	}
1778 
1779 	bzero(dma->vaddr, dma->length);
1780 
1781 	error = ddi_dma_addr_bind_handle(dma->dma_hdl, NULL, dma->vaddr,
1782 	    dma->length, flags, DDI_DMA_SLEEP, NULL, &dma->cookie,
1783 	    &dma->ncookies);
1784 	if (error != DDI_DMA_MAPPED) {
1785 		dma->ncookies = 0;
1786 		dev_err(sc->sc_dip, CE_WARN,
1787 		    "ddi_dma_addr_bind_handle() failed, error = %d", error);
1788 		goto fail3;
1789 	}
1790 
1791 	dma->size = size;
1792 	dma->paddr = dma->cookie.dmac_laddress;
1793 
1794 	if (kvap != NULL)
1795 		*kvap = (void *)dma->vaddr;
1796 
1797 	return (DDI_SUCCESS);
1798 
1799 fail3:
1800 	ddi_dma_mem_free(&dma->acc_hdl);
1801 fail2:
1802 	ddi_dma_free_handle(&dma->dma_hdl);
1803 fail:
1804 	bzero(dma, sizeof (struct iwn_dma_info));
1805 	return (DDI_FAILURE);
1806 }
1807 
1808 static void
1809 iwn_dma_contig_free(struct iwn_dma_info *dma)
1810 {
1811 	if (dma->dma_hdl != NULL) {
1812 		if (dma->ncookies)
1813 			(void) ddi_dma_unbind_handle(dma->dma_hdl);
1814 		ddi_dma_free_handle(&dma->dma_hdl);
1815 	}
1816 
1817 	if (dma->acc_hdl != NULL)
1818 		ddi_dma_mem_free(&dma->acc_hdl);
1819 
1820 	bzero(dma, sizeof (struct iwn_dma_info));
1821 }
1822 
1823 static int
1824 iwn_alloc_sched(struct iwn_softc *sc)
1825 {
1826 	/* TX scheduler rings must be aligned on a 1KB boundary. */
1827 
1828 	return iwn_dma_contig_alloc(sc, &sc->sched_dma, sc->schedsz,
1829 	    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&sc->sched,
1830 	    &iwn_dma_accattr, 1024);
1831 }
1832 
1833 static void
1834 iwn_free_sched(struct iwn_softc *sc)
1835 {
1836 	iwn_dma_contig_free(&sc->sched_dma);
1837 }
1838 
1839 static int
1840 iwn_alloc_kw(struct iwn_softc *sc)
1841 {
1842 	/* "Keep Warm" page must be aligned on a 4KB boundary. */
1843 
1844 	return iwn_dma_contig_alloc(sc, &sc->kw_dma, IWN_KW_SIZE,
1845 	    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, NULL, &iwn_dma_accattr, 4096);
1846 }
1847 
1848 static void
1849 iwn_free_kw(struct iwn_softc *sc)
1850 {
1851 	iwn_dma_contig_free(&sc->kw_dma);
1852 }
1853 
1854 static int
1855 iwn_alloc_ict(struct iwn_softc *sc)
1856 {
1857 	/* ICT table must be aligned on a 4KB boundary. */
1858 
1859 	return iwn_dma_contig_alloc(sc, &sc->ict_dma, IWN_ICT_SIZE,
1860 	    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&sc->ict,
1861 	    &iwn_dma_descattr, 4096);
1862 }
1863 
1864 static void
1865 iwn_free_ict(struct iwn_softc *sc)
1866 {
1867 	iwn_dma_contig_free(&sc->ict_dma);
1868 }
1869 
1870 static int
1871 iwn_alloc_fwmem(struct iwn_softc *sc)
1872 {
1873 	/* Must be aligned on a 16-byte boundary. */
1874 	return iwn_dma_contig_alloc(sc, &sc->fw_dma, sc->fwsz,
1875 	    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, NULL, &iwn_dma_accattr, 16);
1876 }
1877 
1878 static void
1879 iwn_free_fwmem(struct iwn_softc *sc)
1880 {
1881 	iwn_dma_contig_free(&sc->fw_dma);
1882 }
1883 
1884 static int
1885 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1886 {
1887 	size_t size;
1888 	int i, error;
1889 
1890 	ring->cur = 0;
1891 
1892 	/* Allocate RX descriptors (256-byte aligned). */
1893 	size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1894 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, size,
1895 	    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&ring->desc,
1896 	    &iwn_dma_descattr, 256);
1897 	if (error != DDI_SUCCESS) {
1898 		dev_err(sc->sc_dip, CE_WARN,
1899 		    "!could not allocate RX ring DMA memory");
1900 		goto fail;
1901 	}
1902 
1903 	/* Allocate RX status area (16-byte aligned). */
1904 	error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1905 	    sizeof (struct iwn_rx_status), DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1906 	    (void **)&ring->stat, &iwn_dma_descattr, 16);
1907 	if (error != DDI_SUCCESS) {
1908 		dev_err(sc->sc_dip, CE_WARN,
1909 		    "!could not allocate RX status DMA memory");
1910 		goto fail;
1911 	}
1912 
1913 	/*
1914 	 * Allocate and map RX buffers.
1915 	 */
1916 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1917 		struct iwn_rx_data *data = &ring->data[i];
1918 
1919 		error = iwn_dma_contig_alloc(sc, &data->dma_data, IWN_RBUF_SIZE,
1920 		    DDI_DMA_CONSISTENT | DDI_DMA_READ, NULL, &iwn_dma_accattr,
1921 		    256);
1922 		if (error != DDI_SUCCESS) {
1923 			dev_err(sc->sc_dip, CE_WARN,
1924 			    "!could not create RX buf DMA map");
1925 			goto fail;
1926 		}
1927 
1928 		/* Set physical address of RX buffer (256-byte aligned). */
1929 		ring->desc[i] = htole32(data->dma_data.paddr >> 8);
1930 	}
1931 
1932 	(void) ddi_dma_sync(ring->desc_dma.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
1933 
1934 	return 0;
1935 
1936 fail:	iwn_free_rx_ring(sc, ring);
1937 	return error;
1938 }
1939 
1940 static void
1941 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1942 {
1943 	int ntries;
1944 
1945 	if (iwn_nic_lock(sc) == 0) {
1946 		IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1947 		for (ntries = 0; ntries < 1000; ntries++) {
1948 			if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1949 			    IWN_FH_RX_STATUS_IDLE)
1950 				break;
1951 			DELAY(10);
1952 		}
1953 		iwn_nic_unlock(sc);
1954 	}
1955 	ring->cur = 0;
1956 	sc->last_rx_valid = 0;
1957 }
1958 
1959 static void
1960 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1961 {
1962 	_NOTE(ARGUNUSED(sc));
1963 	int i;
1964 
1965 	iwn_dma_contig_free(&ring->desc_dma);
1966 	iwn_dma_contig_free(&ring->stat_dma);
1967 
1968 	for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1969 		struct iwn_rx_data *data = &ring->data[i];
1970 
1971 		if (data->dma_data.dma_hdl)
1972 			iwn_dma_contig_free(&data->dma_data);
1973 	}
1974 }
1975 
1976 static int
1977 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1978 {
1979 	uintptr_t paddr;
1980 	size_t size;
1981 	int i, error;
1982 
1983 	ring->qid = qid;
1984 	ring->queued = 0;
1985 	ring->cur = 0;
1986 
1987 	/* Allocate TX descriptors (256-byte aligned). */
1988 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1989 	error = iwn_dma_contig_alloc(sc, &ring->desc_dma, size,
1990 	    DDI_DMA_CONSISTENT | DDI_DMA_WRITE, (void **)&ring->desc,
1991 	    &iwn_dma_descattr, 256);
1992 	if (error != DDI_SUCCESS) {
1993 		dev_err(sc->sc_dip, CE_WARN,
1994 		    "!could not allocate TX ring DMA memory");
1995 		goto fail;
1996 	}
1997 	/*
1998 	 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1999 	 * to allocate commands space for other rings.
2000 	 * XXX Do we really need to allocate descriptors for other rings?
2001 	 */
2002 	if (qid > 4)
2003 		return 0;
2004 
2005 	size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
2006 	error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, size,
2007 	    DDI_DMA_CONSISTENT | DDI_DMA_WRITE, (void **)&ring->cmd,
2008 	    &iwn_dma_accattr, 4);
2009 	if (error != DDI_SUCCESS) {
2010 		dev_err(sc->sc_dip, CE_WARN,
2011 		    "!could not allocate TX cmd DMA memory");
2012 		goto fail;
2013 	}
2014 
2015 	paddr = ring->cmd_dma.paddr;
2016 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2017 		struct iwn_tx_data *data = &ring->data[i];
2018 
2019 		data->cmd_paddr = paddr;
2020 		data->scratch_paddr = paddr + 12;
2021 		paddr += sizeof (struct iwn_tx_cmd);
2022 
2023 		error = iwn_dma_contig_alloc(sc, &data->dma_data, IWN_TBUF_SIZE,
2024 		    DDI_DMA_CONSISTENT | DDI_DMA_WRITE, NULL, &iwn_dma_accattr,
2025 		    256);
2026 		if (error != DDI_SUCCESS) {
2027 			dev_err(sc->sc_dip, CE_WARN,
2028 			    "!could not create TX buf DMA map");
2029 			goto fail;
2030 		}
2031 	}
2032 	return 0;
2033 
2034 fail:	iwn_free_tx_ring(sc, ring);
2035 	return error;
2036 }
2037 
2038 static void
2039 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2040 {
2041 	int i;
2042 
2043 	if (ring->qid < 4)
2044 		for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2045 			struct iwn_tx_data *data = &ring->data[i];
2046 
2047 			(void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0,
2048 			    DDI_DMA_SYNC_FORDEV);
2049 		}
2050 
2051 	/* Clear TX descriptors. */
2052 	memset(ring->desc, 0, ring->desc_dma.size);
2053 	(void) ddi_dma_sync(ring->desc_dma.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
2054 	sc->qfullmsk &= ~(1 << ring->qid);
2055 	ring->queued = 0;
2056 	ring->cur = 0;
2057 }
2058 
2059 static void
2060 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2061 {
2062 	_NOTE(ARGUNUSED(sc));
2063 	int i;
2064 
2065 	iwn_dma_contig_free(&ring->desc_dma);
2066 	iwn_dma_contig_free(&ring->cmd_dma);
2067 
2068 	for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2069 		struct iwn_tx_data *data = &ring->data[i];
2070 
2071 		if (data->dma_data.dma_hdl)
2072 			iwn_dma_contig_free(&data->dma_data);
2073 	}
2074 }
2075 
2076 static void
2077 iwn5000_ict_reset(struct iwn_softc *sc)
2078 {
2079 	/* Disable interrupts. */
2080 	IWN_WRITE(sc, IWN_INT_MASK, 0);
2081 
2082 	/* Reset ICT table. */
2083 	memset(sc->ict, 0, IWN_ICT_SIZE);
2084 	sc->ict_cur = 0;
2085 
2086 	/* Set physical address of ICT table (4KB aligned). */
2087 	IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
2088 	    IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
2089 
2090 	/* Enable periodic RX interrupt. */
2091 	sc->int_mask |= IWN_INT_RX_PERIODIC;
2092 	/* Switch to ICT interrupt mode in driver. */
2093 	sc->sc_flags |= IWN_FLAG_USE_ICT;
2094 
2095 	/* Re-enable interrupts. */
2096 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
2097 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2098 }
2099 
2100 static int
2101 iwn_read_eeprom(struct iwn_softc *sc)
2102 {
2103 	struct iwn_ops *ops = &sc->ops;
2104 	struct ieee80211com *ic = &sc->sc_ic;
2105 	uint16_t val;
2106 	int error;
2107 
2108 	/* Check whether adapter has an EEPROM or an OTPROM. */
2109 	if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
2110 	    (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
2111 		sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
2112 	IWN_DBG("%s found",
2113 	    (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
2114 
2115 	/* Adapter has to be powered on for EEPROM access to work. */
2116 	if ((error = iwn_apm_init(sc)) != 0) {
2117 		dev_err(sc->sc_dip, CE_WARN,
2118 		    "!could not power ON adapter");
2119 		return error;
2120 	}
2121 
2122 	if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
2123 		dev_err(sc->sc_dip, CE_WARN,
2124 		    "!bad ROM signature");
2125 		return EIO;
2126 	}
2127 	if ((error = iwn_eeprom_lock(sc)) != 0) {
2128 		dev_err(sc->sc_dip, CE_WARN,
2129 		    "!could not lock ROM (error=%d)", error);
2130 		return error;
2131 	}
2132 	if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
2133 		if ((error = iwn_init_otprom(sc)) != 0) {
2134 			dev_err(sc->sc_dip, CE_WARN,
2135 			    "!could not initialize OTPROM");
2136 			return error;
2137 		}
2138 	}
2139 
2140 	iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
2141 	IWN_DBG("SKU capabilities=0x%04x", le16toh(val));
2142 	/* Check if HT support is bonded out. */
2143 	if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
2144 		sc->sc_flags |= IWN_FLAG_HAS_11N;
2145 
2146 	iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
2147 	sc->rfcfg = le16toh(val);
2148 	IWN_DBG("radio config=0x%04x", sc->rfcfg);
2149 	/* Read Tx/Rx chains from ROM unless it's known to be broken. */
2150 	if (sc->txchainmask == 0)
2151 		sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
2152 	if (sc->rxchainmask == 0)
2153 		sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
2154 
2155 	/* Read MAC address. */
2156 	iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_macaddr, 6);
2157 
2158 	/* Read adapter-specific information from EEPROM. */
2159 	ops->read_eeprom(sc);
2160 
2161 	iwn_apm_stop(sc);	/* Power OFF adapter. */
2162 
2163 	iwn_eeprom_unlock(sc);
2164 	return 0;
2165 }
2166 
2167 static void
2168 iwn4965_read_eeprom(struct iwn_softc *sc)
2169 {
2170 	uint32_t addr;
2171 	uint16_t val;
2172 	int i;
2173 
2174 	/* Read regulatory domain (4 ASCII characters). */
2175 	iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
2176 
2177 	/* Read the list of authorized channels (20MHz ones only). */
2178 	for (i = 0; i < 5; i++) {
2179 		addr = iwn4965_regulatory_bands[i];
2180 		iwn_read_eeprom_channels(sc, i, addr);
2181 	}
2182 
2183 	/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
2184 	iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
2185 	sc->maxpwr2GHz = val & 0xff;
2186 	sc->maxpwr5GHz = val >> 8;
2187 	/* Check that EEPROM values are within valid range. */
2188 	if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
2189 		sc->maxpwr5GHz = 38;
2190 	if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
2191 		sc->maxpwr2GHz = 38;
2192 	IWN_DBG("maxpwr 2GHz=%d 5GHz=%d", sc->maxpwr2GHz, sc->maxpwr5GHz);
2193 
2194 	/* Read samples for each TX power group. */
2195 	iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
2196 	    sizeof sc->bands);
2197 
2198 	/* Read voltage at which samples were taken. */
2199 	iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
2200 	sc->eeprom_voltage = (int16_t)le16toh(val);
2201 	IWN_DBG("voltage=%d (in 0.3V)", sc->eeprom_voltage);
2202 
2203 #ifdef IWN_DEBUG
2204 	/* Print samples. */
2205 	if (iwn_dbg_print != 0) {
2206 		for (i = 0; i < IWN_NBANDS; i++)
2207 			iwn4965_print_power_group(sc, i);
2208 	}
2209 #endif
2210 }
2211 
2212 #ifdef IWN_DEBUG
2213 static void
2214 iwn4965_print_power_group(struct iwn_softc *sc, int i)
2215 {
2216 	struct iwn4965_eeprom_band *band = &sc->bands[i];
2217 	struct iwn4965_eeprom_chan_samples *chans = band->chans;
2218 	int j, c;
2219 
2220 	dev_err(sc->sc_dip, CE_CONT, "!===band %d===", i);
2221 	dev_err(sc->sc_dip, CE_CONT, "!chan lo=%d, chan hi=%d", band->lo,
2222 	    band->hi);
2223 	dev_err(sc->sc_dip, CE_CONT,  "!chan1 num=%d", chans[0].num);
2224 	for (c = 0; c < 2; c++) {
2225 		for (j = 0; j < IWN_NSAMPLES; j++) {
2226 			dev_err(sc->sc_dip, CE_CONT, "!chain %d, sample %d: "
2227 			    "temp=%d gain=%d power=%d pa_det=%d", c, j,
2228 			    chans[0].samples[c][j].temp,
2229 			    chans[0].samples[c][j].gain,
2230 			    chans[0].samples[c][j].power,
2231 			    chans[0].samples[c][j].pa_det);
2232 		}
2233 	}
2234 	dev_err(sc->sc_dip, CE_CONT, "!chan2 num=%d", chans[1].num);
2235 	for (c = 0; c < 2; c++) {
2236 		for (j = 0; j < IWN_NSAMPLES; j++) {
2237 			dev_err(sc->sc_dip, CE_CONT, "!chain %d, sample %d: "
2238 			    "temp=%d gain=%d power=%d pa_det=%d", c, j,
2239 			    chans[1].samples[c][j].temp,
2240 			    chans[1].samples[c][j].gain,
2241 			    chans[1].samples[c][j].power,
2242 			    chans[1].samples[c][j].pa_det);
2243 		}
2244 	}
2245 }
2246 #endif
2247 
2248 static void
2249 iwn5000_read_eeprom(struct iwn_softc *sc)
2250 {
2251 	struct iwn5000_eeprom_calib_hdr hdr;
2252 	int32_t volt;
2253 	uint32_t base, addr;
2254 	uint16_t val;
2255 	int i;
2256 
2257 	/* Read regulatory domain (4 ASCII characters). */
2258 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2259 	base = le16toh(val);
2260 	iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
2261 	    sc->eeprom_domain, 4);
2262 
2263 	/* Read the list of authorized channels (20MHz ones only). */
2264 	for (i = 0; i < 5; i++) {
2265 		addr = base + iwn5000_regulatory_bands[i];
2266 		iwn_read_eeprom_channels(sc, i, addr);
2267 	}
2268 
2269 	/* Read enhanced TX power information for 6000 Series. */
2270 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
2271 		iwn_read_eeprom_enhinfo(sc);
2272 
2273 	iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
2274 	base = le16toh(val);
2275 	iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
2276 	IWN_DBG("calib version=%u pa type=%u voltage=%u",
2277 	    hdr.version, hdr.pa_type, le16toh(hdr.volt));
2278 	sc->calib_ver = hdr.version;
2279 
2280 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2281 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2282 	    sc->hw_type == IWN_HW_REV_TYPE_135  ||
2283 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
2284 		sc->eeprom_voltage = le16toh(hdr.volt);
2285 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2286 		sc->eeprom_temp = le16toh(val);
2287 		iwn_read_prom_data(sc, base + IWN2000_EEPROM_RAWTEMP, &val, 2);
2288 		sc->eeprom_rawtemp = le16toh(val);
2289 	}
2290 
2291 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
2292 		/* Compute temperature offset. */
2293 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2294 		sc->eeprom_temp = le16toh(val);
2295 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2296 		volt = le16toh(val);
2297 		sc->temp_off = sc->eeprom_temp - (volt / -5);
2298 		IWN_DBG("temp=%d volt=%d offset=%dK",
2299 		    sc->eeprom_temp, volt, sc->temp_off);
2300 	} else {
2301 		/* Read crystal calibration. */
2302 		iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
2303 		    &sc->eeprom_crystal, sizeof (uint32_t));
2304 		IWN_DBG("crystal calibration 0x%08x",
2305 		    le32toh(sc->eeprom_crystal));
2306 	}
2307 }
2308 
2309 static void
2310 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2311 {
2312 	struct ieee80211com *ic = &sc->sc_ic;
2313 	const struct iwn_chan_band *band = &iwn_bands[n];
2314 	struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
2315 	uint8_t chan;
2316 	int i;
2317 
2318 	iwn_read_prom_data(sc, addr, channels,
2319 	    band->nchan * sizeof (struct iwn_eeprom_chan));
2320 
2321 	for (i = 0; i < band->nchan; i++) {
2322 		if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
2323 			continue;
2324 
2325 		chan = band->chan[i];
2326 
2327 		if (n == 0) {	/* 2GHz band */
2328 			ic->ic_sup_channels[chan].ich_freq =
2329 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
2330 			ic->ic_sup_channels[chan].ich_flags =
2331 			    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
2332 			    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
2333 
2334 		} else {	/* 5GHz band */
2335 			/*
2336 			 * Some adapters support channels 7, 8, 11 and 12
2337 			 * both in the 2GHz and 4.9GHz bands.
2338 			 * Because of limitations in our net80211 layer,
2339 			 * we don't support them in the 4.9GHz band.
2340 			 */
2341 			if (chan <= 14)
2342 				continue;
2343 
2344 			ic->ic_sup_channels[chan].ich_freq =
2345 			    ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
2346 			ic->ic_sup_channels[chan].ich_flags =
2347 			    IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM;
2348 			/* We have at least one valid 5GHz channel. */
2349 			sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
2350 		}
2351 
2352 		/* Is active scan allowed on this channel? */
2353 		if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
2354 			ic->ic_sup_channels[chan].ich_flags |=
2355 			    IEEE80211_CHAN_PASSIVE;
2356 		}
2357 
2358 		/* Save maximum allowed TX power for this channel. */
2359 		sc->maxpwr[chan] = channels[i].maxpwr;
2360 
2361 		IWN_DBG("adding chan %d flags=0x%x maxpwr=%d",
2362 		    chan, channels[i].flags, sc->maxpwr[chan]);
2363 	}
2364 }
2365 
2366 static void
2367 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2368 {
2369 	struct iwn_eeprom_enhinfo enhinfo[35];
2370 	uint16_t val, base;
2371 	int8_t maxpwr;
2372 	int i;
2373 
2374 	iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2375 	base = le16toh(val);
2376 	iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2377 	    enhinfo, sizeof enhinfo);
2378 
2379 	memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
2380 	for (i = 0; i < __arraycount(enhinfo); i++) {
2381 		if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
2382 			continue;	/* Skip invalid entries. */
2383 
2384 		maxpwr = 0;
2385 		if (sc->txchainmask & IWN_ANT_A)
2386 			maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2387 		if (sc->txchainmask & IWN_ANT_B)
2388 			maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2389 		if (sc->txchainmask & IWN_ANT_C)
2390 			maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2391 		if (sc->ntxchains == 2)
2392 			maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2393 		else if (sc->ntxchains == 3)
2394 			maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2395 		maxpwr /= 2;	/* Convert half-dBm to dBm. */
2396 
2397 		IWN_DBG("enhinfo %d, maxpwr=%d", i, maxpwr);
2398 		sc->enh_maxpwr[i] = maxpwr;
2399 	}
2400 }
2401 
2402 static struct ieee80211_node *
2403 iwn_node_alloc(ieee80211com_t *ic)
2404 {
2405 	_NOTE(ARGUNUSED(ic));
2406 	return (kmem_zalloc(sizeof (struct iwn_node), KM_NOSLEEP));
2407 }
2408 
2409 static void
2410 iwn_node_free(ieee80211_node_t *in)
2411 {
2412 	ASSERT(in != NULL);
2413 	ASSERT(in->in_ic != NULL);
2414 
2415 	if (in->in_wpa_ie != NULL)
2416 		ieee80211_free(in->in_wpa_ie);
2417 
2418 	if (in->in_wme_ie != NULL)
2419 		ieee80211_free(in->in_wme_ie);
2420 
2421 	if (in->in_htcap_ie != NULL)
2422 		ieee80211_free(in->in_htcap_ie);
2423 
2424 	kmem_free(in, sizeof (struct iwn_node));
2425 }
2426 
2427 static void
2428 iwn_newassoc(struct ieee80211_node *ni, int isnew)
2429 {
2430 	_NOTE(ARGUNUSED(isnew));
2431 	struct iwn_softc *sc = (struct iwn_softc *)&ni->in_ic;
2432 	struct iwn_node *wn = (void *)ni;
2433 	uint8_t rate, ridx;
2434 	int i;
2435 
2436 	ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
2437 	/*
2438 	 * Select a medium rate and depend on AMRR to raise/lower it.
2439 	 */
2440 	ni->in_txrate = ni->in_rates.ir_nrates / 2;
2441 
2442 	for (i = 0; i < ni->in_rates.ir_nrates; i++) {
2443 		rate = ni->in_rates.ir_rates[i] & IEEE80211_RATE_VAL;
2444 		/* Map 802.11 rate to HW rate index. */
2445 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
2446 			if (iwn_rates[ridx].rate == rate)
2447 				break;
2448 		wn->ridx[i] = ridx;
2449 	}
2450 }
2451 
2452 static int
2453 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
2454 {
2455 	struct iwn_softc *sc = (struct iwn_softc *)ic;
2456 	enum ieee80211_state ostate;
2457 	int error;
2458 
2459 	mutex_enter(&sc->sc_mtx);
2460 	sc->sc_flags |= IWN_FLAG_STOP_CALIB_TO;
2461 	mutex_exit(&sc->sc_mtx);
2462 
2463 	(void) untimeout(sc->calib_to);
2464 	sc->calib_to = 0;
2465 
2466 	mutex_enter(&sc->sc_mtx);
2467 	ostate = ic->ic_state;
2468 
2469 	DTRACE_PROBE5(new__state, int, sc->sc_flags,
2470 	    enum ieee80211_state, ostate,
2471 	    const char *, ieee80211_state_name[ostate],
2472 	    enum ieee80211_state, nstate,
2473 	    const char *, ieee80211_state_name[nstate]);
2474 
2475 	if ((sc->sc_flags & IWN_FLAG_RADIO_OFF) && nstate != IEEE80211_S_INIT) {
2476 		mutex_exit(&sc->sc_mtx);
2477 		return (IWN_FAIL);
2478 	}
2479 
2480 	if (!(sc->sc_flags & IWN_FLAG_HW_INITED) &&
2481 	    nstate != IEEE80211_S_INIT) {
2482 		mutex_exit(&sc->sc_mtx);
2483 		return (IWN_FAIL);
2484 	}
2485 
2486 	switch (nstate) {
2487 	case IEEE80211_S_SCAN:
2488 		/* XXX Do not abort a running scan. */
2489 		if (sc->sc_flags & IWN_FLAG_SCANNING) {
2490 			if (ostate != nstate)
2491 				dev_err(sc->sc_dip, CE_WARN, "!scan request(%d)"
2492 				    " while scanning(%d) ignored", nstate,
2493 				    ostate);
2494 			mutex_exit(&sc->sc_mtx);
2495 			return (0);
2496 		}
2497 
2498 		bcopy(&sc->rxon, &sc->rxon_save, sizeof (sc->rxon));
2499 		sc->sc_ostate = ostate;
2500 
2501 		/* XXX Not sure if call and flags are needed. */
2502 		ieee80211_node_table_reset(&ic->ic_scan);
2503 		ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
2504 		sc->sc_flags |= IWN_FLAG_SCANNING_2GHZ;
2505 
2506 		/* Make the link LED blink while we're scanning. */
2507 		iwn_set_led(sc, IWN_LED_LINK, 10, 10);
2508 
2509 		ic->ic_state = nstate;
2510 
2511 		error = iwn_scan(sc, IEEE80211_CHAN_2GHZ);
2512 		if (error != 0) {
2513 			dev_err(sc->sc_dip, CE_WARN,
2514 			    "!could not initiate scan");
2515 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
2516 			mutex_exit(&sc->sc_mtx);
2517 			return (error);
2518 		}
2519 
2520 		mutex_exit(&sc->sc_mtx);
2521 		sc->scan_to = timeout(iwn_abort_scan, sc, iwn_scan_timeout *
2522 		    drv_usectohz(MICROSEC));
2523 		return (error);
2524 
2525 	case IEEE80211_S_ASSOC:
2526 		if (ostate != IEEE80211_S_RUN) {
2527 			mutex_exit(&sc->sc_mtx);
2528 			break;
2529 		}
2530 		/* FALLTHROUGH */
2531 	case IEEE80211_S_AUTH:
2532 		/* Reset state to handle reassociations correctly. */
2533 		sc->rxon.associd = 0;
2534 		sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
2535 		sc->calib.state = IWN_CALIB_STATE_INIT;
2536 
2537 		if ((error = iwn_auth(sc)) != 0) {
2538 			mutex_exit(&sc->sc_mtx);
2539 			dev_err(sc->sc_dip, CE_WARN,
2540 			    "!could not move to auth state");
2541 			return error;
2542 		}
2543 		mutex_exit(&sc->sc_mtx);
2544 		break;
2545 
2546 	case IEEE80211_S_RUN:
2547 		if ((error = iwn_run(sc)) != 0) {
2548 			mutex_exit(&sc->sc_mtx);
2549 			dev_err(sc->sc_dip, CE_WARN,
2550 			    "!could not move to run state");
2551 			return error;
2552 		}
2553 		mutex_exit(&sc->sc_mtx);
2554 		break;
2555 
2556 	case IEEE80211_S_INIT:
2557 		sc->sc_flags &= ~IWN_FLAG_SCANNING;
2558 		sc->calib.state = IWN_CALIB_STATE_INIT;
2559 
2560 		/*
2561 		 * set LED off after init
2562 		 */
2563 		iwn_set_led(sc, IWN_LED_LINK, 1, 0);
2564 
2565 		cv_signal(&sc->sc_scan_cv);
2566 		mutex_exit(&sc->sc_mtx);
2567 		if (sc->scan_to != 0)
2568 			(void) untimeout(sc->scan_to);
2569 		sc->scan_to = 0;
2570 		break;
2571 	}
2572 
2573 	error = sc->sc_newstate(ic, nstate, arg);
2574 
2575 	if (nstate == IEEE80211_S_RUN)
2576 		ieee80211_start_watchdog(ic, 1);
2577 
2578 	return (error);
2579 }
2580 
2581 static void
2582 iwn_iter_func(void *arg, struct ieee80211_node *ni)
2583 {
2584 	struct iwn_softc *sc = arg;
2585 	struct iwn_node *wn = (struct iwn_node *)ni;
2586 
2587 	ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
2588 }
2589 
2590 static void
2591 iwn_calib_timeout(void *arg)
2592 {
2593 	struct iwn_softc *sc = arg;
2594 	struct ieee80211com *ic = &sc->sc_ic;
2595 
2596 	mutex_enter(&sc->sc_mtx);
2597 
2598 	if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
2599 		if (ic->ic_opmode == IEEE80211_M_STA)
2600 			iwn_iter_func(sc, ic->ic_bss);
2601 		else
2602 			ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc);
2603 	}
2604 	/* Force automatic TX power calibration every 60 secs. */
2605 	if (++sc->calib_cnt >= 120) {
2606 		uint32_t flags = 0;
2607 
2608 		DTRACE_PROBE(get__statistics);
2609 		(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2610 		    sizeof flags, 1);
2611 		sc->calib_cnt = 0;
2612 	}
2613 
2614 	/* Automatic rate control triggered every 500ms. */
2615 	if ((sc->sc_flags & IWN_FLAG_STOP_CALIB_TO) == 0)
2616 		sc->calib_to = timeout(iwn_calib_timeout, sc,
2617 		    drv_usectohz(500000));
2618 
2619 	mutex_exit(&sc->sc_mtx);
2620 }
2621 
2622 /*
2623  * Process an RX_PHY firmware notification.  This is usually immediately
2624  * followed by an MPDU_RX_DONE notification.
2625  */
2626 static void
2627 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2628     struct iwn_rx_data *data)
2629 {
2630 	struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2631 
2632 	(void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2633 	    sizeof (*stat), DDI_DMA_SYNC_FORKERNEL);
2634 
2635 	DTRACE_PROBE1(rx__phy, struct iwn_rx_stat *, stat);
2636 
2637 	/* Save RX statistics, they will be used on MPDU_RX_DONE. */
2638 	memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2639 	sc->last_rx_valid = 1;
2640 }
2641 
2642 /*
2643  * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2644  * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2645  */
2646 static void
2647 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2648     struct iwn_rx_data *data)
2649 {
2650 	struct iwn_ops *ops = &sc->ops;
2651 	struct ieee80211com *ic = &sc->sc_ic;
2652 	struct iwn_rx_ring *ring = &sc->rxq;
2653 	struct ieee80211_frame *wh;
2654 	struct ieee80211_node *ni;
2655 	mblk_t *m;
2656 	struct iwn_rx_stat *stat;
2657 	char	*head;
2658 	uint32_t flags;
2659 	int len, rssi;
2660 
2661 	if (desc->type == IWN_MPDU_RX_DONE) {
2662 		/* Check for prior RX_PHY notification. */
2663 		if (!sc->last_rx_valid) {
2664 			dev_err(sc->sc_dip, CE_WARN,
2665 			    "missing RX_PHY");
2666 			return;
2667 		}
2668 		sc->last_rx_valid = 0;
2669 		stat = &sc->last_rx_stat;
2670 	} else
2671 		stat = (struct iwn_rx_stat *)(desc + 1);
2672 
2673 	(void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0,
2674 	    DDI_DMA_SYNC_FORKERNEL);
2675 
2676 	if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2677 		dev_err(sc->sc_dip, CE_WARN,
2678 		    "!invalid RX statistic header");
2679 		return;
2680 	}
2681 	if (desc->type == IWN_MPDU_RX_DONE) {
2682 		struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2683 		head = (char *)(mpdu + 1);
2684 		len = le16toh(mpdu->len);
2685 	} else {
2686 		head = (char *)(stat + 1) + stat->cfg_phy_len;
2687 		len = le16toh(stat->len);
2688 	}
2689 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
2690 	flags = le32toh(*(uint32_t *)(head + len));
2691 
2692 	/* Discard frames with a bad FCS early. */
2693 	if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2694 		sc->sc_rx_err++;
2695 		ic->ic_stats.is_fcs_errors++;
2696 		return;
2697 	}
2698 	/* Discard frames that are too short. */
2699 	if (len < sizeof (*wh)) {
2700 		sc->sc_rx_err++;
2701 		return;
2702 	}
2703 
2704 	m = allocb(len, BPRI_MED);
2705 	if (m == NULL) {
2706 		sc->sc_rx_nobuf++;
2707 		return;
2708 	}
2709 
2710 	/* Update RX descriptor. */
2711 	ring->desc[ring->cur] =
2712 	    htole32(data->dma_data.paddr >> 8);
2713 	(void) ddi_dma_sync(ring->desc_dma.dma_hdl,
2714 	    ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2715 	    DDI_DMA_SYNC_FORDEV);
2716 
2717 	/* Grab a reference to the source node. */
2718 	wh = (struct ieee80211_frame*)head;
2719 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame *)wh);
2720 
2721 	/* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */
2722 	/* NetBSD does decryption in ieee80211_input. */
2723 
2724 	rssi = ops->get_rssi(stat);
2725 
2726 	/*
2727 	 * convert dBm to percentage
2728 	 */
2729 	rssi = (100 * 75 * 75 - (-20 - rssi) * (15 * 75 + 62 * (-20 - rssi)))
2730 	    / (75 * 75);
2731 	if (rssi > 100)
2732 		rssi = 100;
2733 	else if (rssi < 1)
2734 		rssi = 1;
2735 
2736 	bcopy(wh, m->b_wptr, len);
2737 	m->b_wptr += len;
2738 
2739 	/* XXX Added for NetBSD: scans never stop without it */
2740 	if (ic->ic_state == IEEE80211_S_SCAN)
2741 		iwn_fix_channel(sc, m, stat);
2742 
2743 	/* Send the frame to the 802.11 layer. */
2744 	ieee80211_input(ic, m, ni, rssi, 0);
2745 
2746 	/* Node is no longer needed. */
2747 	ieee80211_free_node(ni);
2748 }
2749 
2750 #ifndef IEEE80211_NO_HT
2751 /* Process an incoming Compressed BlockAck. */
2752 static void
2753 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2754     struct iwn_rx_data *data)
2755 {
2756 	struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2757 	struct iwn_tx_ring *txq;
2758 
2759 	(void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2760 	    sizeof (*ba), DDI_DMA_SYNC_FORKERNEL);
2761 
2762 	txq = &sc->txq[le16toh(ba->qid)];
2763 	/* XXX TBD */
2764 }
2765 #endif
2766 
2767 /*
2768  * Process a CALIBRATION_RESULT notification sent by the initialization
2769  * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2770  */
2771 static void
2772 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2773     struct iwn_rx_data *data)
2774 {
2775 	struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2776 	int len, idx = -1;
2777 
2778 	/* Runtime firmware should not send such a notification. */
2779 	if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2780 		return;
2781 
2782 	len = (le32toh(desc->len) & 0x3fff) - 4;
2783 	(void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc), len,
2784 	    DDI_DMA_SYNC_FORKERNEL);
2785 
2786 	switch (calib->code) {
2787 	case IWN5000_PHY_CALIB_DC:
2788 		if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2789 		    sc->hw_type == IWN_HW_REV_TYPE_2030 ||
2790 		    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
2791 		    sc->hw_type == IWN_HW_REV_TYPE_135  ||
2792 		    sc->hw_type == IWN_HW_REV_TYPE_105)
2793 			idx = 0;
2794 		break;
2795 	case IWN5000_PHY_CALIB_LO:
2796 		idx = 1;
2797 		break;
2798 	case IWN5000_PHY_CALIB_TX_IQ:
2799 		idx = 2;
2800 		break;
2801 	case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2802 		if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2803 		    sc->hw_type != IWN_HW_REV_TYPE_5150)
2804 			idx = 3;
2805 		break;
2806 	case IWN5000_PHY_CALIB_BASE_BAND:
2807 		idx = 4;
2808 		break;
2809 	}
2810 	if (idx == -1)	/* Ignore other results. */
2811 		return;
2812 
2813 	/* Save calibration result. */
2814 	if (sc->calibcmd[idx].buf != NULL)
2815 		kmem_free(sc->calibcmd[idx].buf, sc->calibcmd[idx].len);
2816 	sc->calibcmd[idx].buf = kmem_zalloc(len, KM_NOSLEEP);
2817 	if (sc->calibcmd[idx].buf == NULL) {
2818 		return;
2819 	}
2820 	sc->calibcmd[idx].len = len;
2821 	memcpy(sc->calibcmd[idx].buf, calib, len);
2822 }
2823 
2824 /*
2825  * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2826  * The latter is sent by the firmware after each received beacon.
2827  */
2828 static void
2829 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2830     struct iwn_rx_data *data)
2831 {
2832 	struct iwn_ops *ops = &sc->ops;
2833 	struct ieee80211com *ic = &sc->sc_ic;
2834 	struct iwn_calib_state *calib = &sc->calib;
2835 	struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2836 	int temp = 0;
2837 
2838 	/* Ignore statistics received during a scan. */
2839 	if (ic->ic_state != IEEE80211_S_RUN)
2840 		return;
2841 
2842 	(void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2843 	    sizeof (*stats), DDI_DMA_SYNC_FORKERNEL);
2844 
2845 	sc->calib_cnt = 0;	/* Reset TX power calibration timeout. */
2846 
2847 	/* Test if temperature has changed. */
2848 	if (stats->general.temp != sc->rawtemp) {
2849 		/* Convert "raw" temperature to degC. */
2850 		sc->rawtemp = stats->general.temp;
2851 		temp = ops->get_temperature(sc);
2852 		sc->sc_misc->temp.value.ul = temp;
2853 
2854 		/* Update TX power if need be (4965AGN only). */
2855 		if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2856 			iwn4965_power_calibration(sc, temp);
2857 	}
2858 
2859 	DTRACE_PROBE2(rx__statistics, struct iwn_stats *, stats, int, temp);
2860 
2861 	if (desc->type != IWN_BEACON_STATISTICS)
2862 		return;	/* Reply to a statistics request. */
2863 
2864 	sc->noise = iwn_get_noise(&stats->rx.general);
2865 	sc->sc_misc->noise.value.l = sc->noise;
2866 
2867 	/* Test that RSSI and noise are present in stats report. */
2868 	if (le32toh(stats->rx.general.flags) != 1) {
2869 		return;
2870 	}
2871 
2872 	/*
2873 	 * XXX Differential gain calibration makes the 6005 firmware
2874 	 * crap out, so skip it for now.  This effectively disables
2875 	 * sensitivity tuning as well.
2876 	 */
2877 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2878 		return;
2879 
2880 	if (calib->state == IWN_CALIB_STATE_ASSOC)
2881 		iwn_collect_noise(sc, &stats->rx.general);
2882 	else if (calib->state == IWN_CALIB_STATE_RUN)
2883 		iwn_tune_sensitivity(sc, &stats->rx);
2884 }
2885 
2886 /*
2887  * Process a TX_DONE firmware notification.  Unfortunately, the 4965AGN
2888  * and 5000 adapters have different incompatible TX status formats.
2889  */
2890 static void
2891 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2892     struct iwn_rx_data *data)
2893 {
2894 	struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2895 
2896 	(void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2897 	    sizeof (*stat), DDI_DMA_SYNC_FORKERNEL);
2898 	iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2899 }
2900 
2901 static void
2902 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2903     struct iwn_rx_data *data)
2904 {
2905 	struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2906 
2907 #ifdef notyet
2908 	/* Reset TX scheduler slot. */
2909 	iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2910 #endif
2911 
2912 	(void) ddi_dma_sync(data->dma_data.dma_hdl, sizeof (*desc),
2913 	    sizeof (*stat), DDI_DMA_SYNC_FORKERNEL);
2914 	iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2915 }
2916 
2917 /*
2918  * Adapter-independent backend for TX_DONE firmware notifications.
2919  */
2920 static void
2921 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2922     uint8_t status)
2923 {
2924 	struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2925 	struct iwn_tx_data *data = &ring->data[desc->idx];
2926 	struct iwn_node *wn = (struct iwn_node *)data->ni;
2927 
2928 	/* Update rate control statistics. */
2929 	wn->amn.amn_txcnt++;
2930 	if (ackfailcnt > 0)
2931 		wn->amn.amn_retrycnt++;
2932 
2933 	if (status != 1 && status != 2)
2934 		sc->sc_tx_err++;
2935 	else
2936 		sc->sc_ic.ic_stats.is_tx_frags++;
2937 
2938 	ieee80211_free_node(data->ni);
2939 	data->ni = NULL;
2940 
2941 	mutex_enter(&sc->sc_tx_mtx);
2942 	sc->sc_tx_timer = 0;
2943 	if (--ring->queued < IWN_TX_RING_LOMARK) {
2944 		sc->qfullmsk &= ~(1 << ring->qid);
2945 	}
2946 	mac_tx_update(sc->sc_ic.ic_mach);
2947 	mutex_exit(&sc->sc_tx_mtx);
2948 }
2949 
2950 /*
2951  * Process a "command done" firmware notification.  This is where we wakeup
2952  * processes waiting for a synchronous command completion.
2953  */
2954 static void
2955 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2956 {
2957 	struct iwn_tx_ring *ring = &sc->txq[IWN_CMD_QUEUE_NUM];
2958 	struct iwn_tx_data *data;
2959 
2960 	if ((desc->qid & 0xf) != IWN_CMD_QUEUE_NUM)
2961 		return;	/* Not a command ack. */
2962 
2963 	data = &ring->data[desc->idx];
2964 
2965 	(void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
2966 
2967 	/* If the command was mapped in an extra buffer, free it. */
2968 	if (data->cmd_dma.dma_hdl) {
2969 		(void) ddi_dma_sync(data->cmd_dma.dma_hdl, 0, 0,
2970 		    DDI_DMA_SYNC_FORDEV);
2971 		iwn_dma_contig_free(&data->cmd_dma);
2972 	}
2973 
2974 	mutex_enter(&sc->sc_mtx);
2975 	sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2976 	cv_signal(&sc->sc_cmd_cv);
2977 	mutex_exit(&sc->sc_mtx);
2978 }
2979 
2980 /*
2981  * Process an INT_FH_RX or INT_SW_RX interrupt.
2982  */
2983 static void
2984 iwn_notif_intr(struct iwn_softc *sc)
2985 {
2986 	struct iwn_ops *ops = &sc->ops;
2987 	struct ieee80211com *ic = &sc->sc_ic;
2988 	uint16_t hw;
2989 
2990 	ASSERT(sc != NULL);
2991 
2992 	(void) ddi_dma_sync(sc->rxq.stat_dma.dma_hdl, 0, 0,
2993 	    DDI_DMA_SYNC_FORKERNEL);
2994 
2995 	hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2996 	while (sc->rxq.cur != hw) {
2997 		struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2998 		struct iwn_rx_desc *desc;
2999 
3000 		(void) ddi_dma_sync(data->dma_data.dma_hdl, 0, sizeof (*desc),
3001 		    DDI_DMA_SYNC_FORKERNEL);
3002 		desc = (struct iwn_rx_desc *)data->dma_data.vaddr;
3003 
3004 		DTRACE_PROBE1(notification__intr, struct iwn_rx_desc *, desc);
3005 
3006 		if (!(desc->qid & 0x80))	/* Reply to a command. */
3007 			iwn_cmd_done(sc, desc);
3008 
3009 		switch (desc->type) {
3010 		case IWN_RX_PHY:
3011 			iwn_rx_phy(sc, desc, data);
3012 			break;
3013 
3014 		case IWN_RX_DONE:		/* 4965AGN only. */
3015 		case IWN_MPDU_RX_DONE:
3016 			/* An 802.11 frame has been received. */
3017 			iwn_rx_done(sc, desc, data);
3018 			break;
3019 #ifndef IEEE80211_NO_HT
3020 		case IWN_RX_COMPRESSED_BA:
3021 			/* A Compressed BlockAck has been received. */
3022 			iwn_rx_compressed_ba(sc, desc, data);
3023 			break;
3024 #endif
3025 		case IWN_TX_DONE:
3026 			/* An 802.11 frame has been transmitted. */
3027 			ops->tx_done(sc, desc, data);
3028 			break;
3029 
3030 		case IWN_RX_STATISTICS:
3031 		case IWN_BEACON_STATISTICS:
3032 			mutex_enter(&sc->sc_mtx);
3033 			iwn_rx_statistics(sc, desc, data);
3034 			mutex_exit(&sc->sc_mtx);
3035 			break;
3036 
3037 		case IWN_BEACON_MISSED:
3038 		{
3039 			struct iwn_beacon_missed *miss =
3040 			    (struct iwn_beacon_missed *)(desc + 1);
3041 
3042 			(void) ddi_dma_sync(data->dma_data.dma_hdl,
3043 			    sizeof (*desc), sizeof (*miss),
3044 			    DDI_DMA_SYNC_FORKERNEL);
3045 			/*
3046 			 * If more than iwn_beacons_missed_disconnect
3047 			 * consecutive beacons are missed, we've probably lost
3048 			 * our connection.
3049 			 * If more than iwn_beacons_missed_sensitivity
3050 			 * consecutive beacons are missed, reinitialize the
3051 			 * sensitivity state machine.
3052 			 */
3053 			DTRACE_PROBE1(beacons__missed,
3054 			    struct iwn_beacon_missed *, miss);
3055 			if (ic->ic_state == IEEE80211_S_RUN) {
3056 				if (le32toh(miss->consecutive)
3057 				    > iwn_beacons_missed_disconnect) {
3058 					dev_err(sc->sc_dip, CE_WARN,
3059 					    "!iwn_notif_intr(): %d consecutive "
3060 					    "beacons missed, disconnecting",
3061 					    le32toh(miss->consecutive));
3062 					ieee80211_new_state(ic,
3063 					    IEEE80211_S_INIT, -1);
3064 				} else if (le32toh(miss->consecutive)
3065 				    > iwn_beacons_missed_sensitivity) {
3066 					mutex_enter(&sc->sc_mtx);
3067 					(void)iwn_init_sensitivity(sc);
3068 					mutex_exit(&sc->sc_mtx);
3069 				}
3070 			}
3071 			break;
3072 		}
3073 		case IWN_UC_READY:
3074 		{
3075 			struct iwn_ucode_info *uc =
3076 			    (struct iwn_ucode_info *)(desc + 1);
3077 
3078 			/* The microcontroller is ready. */
3079 			(void) ddi_dma_sync(data->dma_data.dma_hdl,
3080 			    sizeof (*desc), sizeof (*uc),
3081 			    DDI_DMA_SYNC_FORKERNEL);
3082 			DTRACE_PROBE1(uc__ready, struct iwn_ucode_info *, uc)
3083 
3084 			if (le32toh(uc->valid) != 1) {
3085 				dev_err(sc->sc_dip, CE_WARN,
3086 				    "!microcontroller initialization failed");
3087 				break;
3088 			}
3089 			if (uc->subtype == IWN_UCODE_INIT) {
3090 				/* Save microcontroller report. */
3091 				memcpy(&sc->ucode_info, uc, sizeof (*uc));
3092 			}
3093 			/* Save the address of the error log in SRAM. */
3094 			sc->errptr = le32toh(uc->errptr);
3095 			break;
3096 		}
3097 		case IWN_STATE_CHANGED:
3098 		{
3099 			/*LINTED: E_PTR_BAD_CAST_ALIGN*/
3100 			uint32_t *status = (uint32_t *)(desc + 1);
3101 
3102 			/* Enabled/disabled notification. */
3103 			(void) ddi_dma_sync(data->dma_data.dma_hdl,
3104 			    sizeof (*desc), sizeof (*status),
3105 			    DDI_DMA_SYNC_FORKERNEL);
3106 			DTRACE_PROBE1(state__changed, uint32_t, *status);
3107 
3108 			if (le32toh(*status) & 1) {
3109 				/* The radio button has to be pushed. */
3110 				dev_err(sc->sc_dip, CE_WARN,
3111 				    "!Radio transmitter is off");
3112 				/* Turn the interface down. */
3113 				mutex_enter(&sc->sc_mtx);
3114 				sc->sc_flags |=
3115 				    IWN_FLAG_HW_ERR_RECOVER |
3116 				    IWN_FLAG_RADIO_OFF;
3117 				mutex_exit(&sc->sc_mtx);
3118 				ieee80211_new_state(&sc->sc_ic,
3119 				    IEEE80211_S_INIT, -1);
3120 
3121 				return;	/* No further processing. */
3122 			}
3123 			break;
3124 		}
3125 		case IWN_START_SCAN:
3126 		{
3127 			struct iwn_start_scan *scan =
3128 			    (struct iwn_start_scan *)(desc + 1);
3129 
3130 			(void) ddi_dma_sync(data->dma_data.dma_hdl,
3131 			    sizeof (*desc), sizeof (*scan),
3132 			    DDI_DMA_SYNC_FORKERNEL);
3133 			DTRACE_PROBE2(start__scan, uint8_t, scan->chan,
3134 			    uint32_t, le32toh(scan->status));
3135 
3136 			/* Fix current channel. */
3137 			ic->ic_curchan = ic->ic_bss->in_chan =
3138 			    &ic->ic_sup_channels[scan->chan];
3139 			break;
3140 		}
3141 		case IWN_STOP_SCAN:
3142 		{
3143 			struct iwn_stop_scan *scan =
3144 			    (struct iwn_stop_scan *)(desc + 1);
3145 
3146 			(void) ddi_dma_sync(data->dma_data.dma_hdl,
3147 			    sizeof (*desc), sizeof (*scan),
3148 			    DDI_DMA_SYNC_FORKERNEL);
3149 			DTRACE_PROBE3(stop__scan, uint8_t, scan->chan,
3150 			    uint32_t, le32toh(scan->status),
3151 			    uint8_t, scan->nchan);
3152 
3153 			if (iwn_enable_5ghz != 0 &&
3154 			    (sc->sc_flags & IWN_FLAG_SCANNING_2GHZ) &&
3155 			    (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
3156 				/*
3157 				 * We just finished scanning 2GHz channels,
3158 				 * start scanning 5GHz ones.
3159 				 */
3160 				mutex_enter(&sc->sc_mtx);
3161 				sc->sc_flags |= IWN_FLAG_SCANNING_5GHZ;
3162 				sc->sc_flags &= ~IWN_FLAG_SCANNING_2GHZ;
3163 				if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0) {
3164 					mutex_exit(&sc->sc_mtx);
3165 					break;
3166 				}
3167 				mutex_exit(&sc->sc_mtx);
3168 			}
3169 			ieee80211_end_scan(ic);
3170 			mutex_enter(&sc->sc_mtx);
3171 			sc->sc_flags &= ~IWN_FLAG_SCANNING;
3172 			cv_signal(&sc->sc_scan_cv);
3173 			mutex_exit(&sc->sc_mtx);
3174 			(void) untimeout(sc->scan_to);
3175 			sc->scan_to = 0;
3176 			break;
3177 		}
3178 		case IWN5000_CALIBRATION_RESULT:
3179 			iwn5000_rx_calib_results(sc, desc, data);
3180 			break;
3181 
3182 		case IWN5000_CALIBRATION_DONE:
3183 			mutex_enter(&sc->sc_mtx);
3184 			sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3185 			cv_signal(&sc->sc_calib_cv);
3186 			mutex_exit(&sc->sc_mtx);
3187 			break;
3188 		}
3189 
3190 		sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3191 	}
3192 
3193 	/* Tell the firmware what we have processed. */
3194 	hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3195 	IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3196 }
3197 
3198 /*
3199  * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3200  * from power-down sleep mode.
3201  */
3202 static void
3203 iwn_wakeup_intr(struct iwn_softc *sc)
3204 {
3205 	int qid;
3206 
3207 	DTRACE_PROBE(wakeup__intr);
3208 
3209 	/* Wakeup RX and TX rings. */
3210 	IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3211 	for (qid = 0; qid < sc->ntxqs; qid++) {
3212 		struct iwn_tx_ring *ring = &sc->txq[qid];
3213 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3214 	}
3215 }
3216 
3217 /*
3218  * Dump the error log of the firmware when a firmware panic occurs.  Although
3219  * we can't debug the firmware because it is neither open source nor free, it
3220  * can help us to identify certain classes of problems.
3221  */
3222 static void
3223 iwn_fatal_intr(struct iwn_softc *sc)
3224 {
3225 	struct iwn_fw_dump dump;
3226 	int i;
3227 
3228 	/* Force a complete recalibration on next init. */
3229 	sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3230 
3231 	/* Check that the error log address is valid. */
3232 	if (sc->errptr < IWN_FW_DATA_BASE ||
3233 	    sc->errptr + sizeof (dump) >
3234 	    IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3235 		dev_err(sc->sc_dip, CE_WARN,
3236 		    "!bad firmware error log address 0x%08x", sc->errptr);
3237 		return;
3238 	}
3239 	if (iwn_nic_lock(sc) != 0) {
3240 		dev_err(sc->sc_dip, CE_WARN,
3241 		    "!could not read firmware error log");
3242 		return;
3243 	}
3244 	/* Read firmware error log from SRAM. */
3245 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
3246 	iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3247 	    sizeof (dump) / sizeof (uint32_t));
3248 	iwn_nic_unlock(sc);
3249 
3250 	if (dump.valid == 0) {
3251 		dev_err(sc->sc_dip, CE_WARN,
3252 		    "!firmware error log is empty");
3253 		return;
3254 	}
3255 	dev_err(sc->sc_dip, CE_WARN, "!firmware error log:");
3256 	dev_err(sc->sc_dip, CE_CONT, "!  error type      = \"%s\" (0x%08X)",
3257 	    (dump.id < __arraycount(iwn_fw_errmsg)) ?
3258 		iwn_fw_errmsg[dump.id] : "UNKNOWN",
3259 	    dump.id);
3260 	dev_err(sc->sc_dip, CE_CONT, "!  program counter = 0x%08X", dump.pc);
3261 	dev_err(sc->sc_dip, CE_CONT, "!  source line     = 0x%08X",
3262 	    dump.src_line);
3263 	dev_err(sc->sc_dip, CE_CONT, "!  error data      = 0x%08X%08X",
3264 	    dump.error_data[0], dump.error_data[1]);
3265 	dev_err(sc->sc_dip, CE_CONT, "!  branch link     = 0x%08X%08X",
3266 	    dump.branch_link[0], dump.branch_link[1]);
3267 	dev_err(sc->sc_dip, CE_CONT, "!  interrupt link  = 0x%08X%08X",
3268 	    dump.interrupt_link[0], dump.interrupt_link[1]);
3269 	dev_err(sc->sc_dip, CE_CONT, "!  time            = %u", dump.time[0]);
3270 
3271 	/* Dump driver status (TX and RX rings) while we're here. */
3272 	dev_err(sc->sc_dip, CE_WARN, "!driver status:");
3273 	for (i = 0; i < sc->ntxqs; i++) {
3274 		struct iwn_tx_ring *ring = &sc->txq[i];
3275 		dev_err(sc->sc_dip, CE_WARN,
3276 		    "!  tx ring %2d: qid=%2d cur=%3d queued=%3d",
3277 		    i, ring->qid, ring->cur, ring->queued);
3278 	}
3279 	dev_err(sc->sc_dip, CE_WARN, "!  rx ring: cur=%d", sc->rxq.cur);
3280 	dev_err(sc->sc_dip, CE_WARN, "!  802.11 state %d", sc->sc_ic.ic_state);
3281 }
3282 
3283 /*ARGSUSED1*/
3284 static uint_t
3285 iwn_intr(caddr_t arg, caddr_t unused)
3286 {
3287 	_NOTE(ARGUNUSED(unused));
3288 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
3289 	struct iwn_softc *sc = (struct iwn_softc *)arg;
3290 	uint32_t r1, r2, tmp;
3291 
3292 	if (sc == NULL)
3293 		return (DDI_INTR_UNCLAIMED);
3294 
3295 	/* Disable interrupts. */
3296 	IWN_WRITE(sc, IWN_INT_MASK, 0);
3297 
3298 	/* Read interrupts from ICT (fast) or from registers (slow). */
3299 	if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3300 		(void) ddi_dma_sync(sc->ict_dma.dma_hdl, 0, 0,
3301 		    DDI_DMA_SYNC_FORKERNEL);
3302 		tmp = 0;
3303 		while (sc->ict[sc->ict_cur] != 0) {
3304 			tmp |= sc->ict[sc->ict_cur];
3305 			sc->ict[sc->ict_cur] = 0;	/* Acknowledge. */
3306 			sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3307 		}
3308 		(void) ddi_dma_sync(sc->ict_dma.dma_hdl, 0, 0,
3309 		    DDI_DMA_SYNC_FORDEV);
3310 		tmp = le32toh(tmp);
3311 		if (tmp == 0xffffffff)	/* Shouldn't happen. */
3312 			tmp = 0;
3313 		else if (tmp & 0xc0000)	/* Workaround a HW bug. */
3314 			tmp |= 0x8000;
3315 		r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3316 		r2 = 0;	/* Unused. */
3317 	} else {
3318 		r1 = IWN_READ(sc, IWN_INT);
3319 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3320 			return (DDI_INTR_UNCLAIMED);	/* Hardware gone! */
3321 		r2 = IWN_READ(sc, IWN_FH_INT);
3322 	}
3323 	if (r1 == 0 && r2 == 0) {
3324 		IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3325 		return (DDI_INTR_UNCLAIMED);	/* Interrupt not for us. */
3326 	}
3327 
3328 	/* Acknowledge interrupts. */
3329 	IWN_WRITE(sc, IWN_INT, r1);
3330 	if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3331 		IWN_WRITE(sc, IWN_FH_INT, r2);
3332 
3333 	if (r1 & IWN_INT_RF_TOGGLED) {
3334 		tmp = IWN_READ(sc, IWN_GP_CNTRL);
3335 		dev_err(sc->sc_dip, CE_NOTE,
3336 		    "!RF switch: radio %s",
3337 		    (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3338 	}
3339 	if (r1 & IWN_INT_CT_REACHED) {
3340 		dev_err(sc->sc_dip, CE_WARN,
3341 		    "!critical temperature reached!");
3342 	}
3343 	if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3344 		dev_err(sc->sc_dip, CE_WARN,
3345 		    "!fatal firmware error");
3346 		/* Dump firmware error log and stop. */
3347 		iwn_fatal_intr(sc);
3348 		iwn_hw_stop(sc, B_TRUE);
3349 		if (!IWN_CHK_FAST_RECOVER(sc))
3350 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3351 		mutex_enter(&sc->sc_mtx);
3352 		sc->sc_flags |= IWN_FLAG_HW_ERR_RECOVER;
3353 		mutex_exit(&sc->sc_mtx);
3354 
3355 		return (DDI_INTR_CLAIMED);
3356 	}
3357 	if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3358 	    (r2 & IWN_FH_INT_RX)) {
3359 		if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3360 			int ena = (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX));
3361 
3362 			if (ena)
3363 				IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3364 			IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3365 			    IWN_INT_PERIODIC_DIS);
3366 			iwn_notif_intr(sc);
3367 			if (ena)
3368 				IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3369 				    IWN_INT_PERIODIC_ENA);
3370 		} else {
3371 			iwn_notif_intr(sc);
3372 		}
3373 	}
3374 
3375 	if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3376 		if (sc->sc_flags & IWN_FLAG_USE_ICT)
3377 			IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3378 		mutex_enter(&sc->sc_mtx);
3379 		sc->sc_flags |= IWN_FLAG_FW_DMA;
3380 		cv_signal(&sc->sc_fhdma_cv);
3381 		mutex_exit(&sc->sc_mtx);
3382 	}
3383 
3384 	if (r1 & IWN_INT_ALIVE) {
3385 		mutex_enter(&sc->sc_mtx);
3386 		sc->sc_flags |= IWN_FLAG_FW_ALIVE;
3387 		cv_signal(&sc->sc_alive_cv);
3388 		mutex_exit(&sc->sc_mtx);
3389 	}
3390 
3391 	if (r1 & IWN_INT_WAKEUP)
3392 		iwn_wakeup_intr(sc);
3393 
3394 	/* Re-enable interrupts. */
3395 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3396 	return (DDI_INTR_CLAIMED);
3397 }
3398 
3399 /*
3400  * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3401  * 5000 adapters use a slightly different format).
3402  */
3403 static void
3404 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3405     uint16_t len)
3406 {
3407 	_NOTE(ARGUNUSED(id));
3408 	int w_idx = qid * IWN4965_SCHED_COUNT + idx;
3409 	uint16_t *w = &sc->sched[w_idx];
3410 
3411 	*w = htole16(len + 8);
3412 	(void) ddi_dma_sync(sc->sched_dma.dma_hdl, w_idx * sizeof (uint16_t),
3413 	    sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3414 	if (idx < IWN_SCHED_WINSZ) {
3415 		*(w + IWN_TX_RING_COUNT) = *w;
3416 		(void) ddi_dma_sync(sc->sched_dma.dma_hdl,
3417 		    (w_idx + IWN_TX_RING_COUNT) * sizeof (uint16_t),
3418 		    sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3419 	}
3420 }
3421 
3422 static void
3423 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3424     uint16_t len)
3425 {
3426 	int w_idx = qid * IWN5000_SCHED_COUNT + idx;
3427 	uint16_t *w = &sc->sched[w_idx];
3428 
3429 	*w = htole16(id << 12 | (len + 8));
3430 	(void) ddi_dma_sync(sc->sched_dma.dma_hdl, w_idx * sizeof (uint16_t),
3431 	    sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3432 	if (idx < IWN_SCHED_WINSZ) {
3433 		*(w + IWN_TX_RING_COUNT) = *w;
3434 		(void) ddi_dma_sync(sc->sched_dma.dma_hdl,
3435 		    (w_idx + IWN_TX_RING_COUNT) * sizeof (uint16_t),
3436 		    sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3437 	}
3438 }
3439 
3440 #ifdef notyet
3441 static void
3442 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3443 {
3444 	int w_idx = qid * IWN5000_SCHED_COUNT + idx;
3445 	uint16_t *w = &sc->sched[w_idx];
3446 
3447 	*w = (*w & htole16(0xf000)) | htole16(1);
3448 	(void) ddi_dma_sync(sc->sched_dma.dma_hdl, w_idx * sizeof (uint16_t),
3449 	    sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3450 	if (idx < IWN_SCHED_WINSZ) {
3451 		*(w + IWN_TX_RING_COUNT) = *w;
3452 		(void) ddi_dma_sync(sc->sched_dma.dma_hdl,
3453 		    (w_idx + IWN_TX_RING_COUNT) * sizeof (uint16_t),
3454 		    sizeof (uint16_t), DDI_DMA_SYNC_FORDEV);
3455 	}
3456 }
3457 #endif
3458 
3459 /*
3460  * This function is only for compatibility with Net80211 module.
3461  * iwn_qosparam_to_hw() is the actual function updating EDCA
3462  * parameters to hardware.
3463  */
3464 static int
3465 iwn_wme_update(struct ieee80211com *ic)
3466 {
3467 	_NOTE(ARGUNUSED(ic));
3468 	return (0);
3469 }
3470 
3471 static int
3472 iwn_wme_to_qos_ac(struct iwn_softc *sc, int wme_ac)
3473 {
3474 	int qos_ac;
3475 
3476 	switch (wme_ac) {
3477 	case WME_AC_BE:
3478 		qos_ac = QOS_AC_BK;
3479 		break;
3480 	case WME_AC_BK:
3481 		qos_ac = QOS_AC_BE;
3482 		break;
3483 	case WME_AC_VI:
3484 		qos_ac = QOS_AC_VI;
3485 		break;
3486 	case WME_AC_VO:
3487 		qos_ac = QOS_AC_VO;
3488 		break;
3489 	default:
3490 		dev_err(sc->sc_dip, CE_WARN, "!iwn_wme_to_qos_ac(): "
3491 		    "WME AC index is not in suitable range.\n");
3492 		qos_ac = QOS_AC_INVALID;
3493 		break;
3494 	}
3495 
3496 	return (qos_ac);
3497 }
3498 
3499 static uint16_t
3500 iwn_cw_e_to_cw(uint8_t cw_e)
3501 {
3502 	uint16_t cw = 1;
3503 
3504 	while (cw_e > 0) {
3505 		cw <<= 1;
3506 		cw_e--;
3507 	}
3508 
3509 	cw -= 1;
3510 	return (cw);
3511 }
3512 
3513 static int
3514 iwn_wmeparam_check(struct iwn_softc *sc, struct wmeParams *wmeparam)
3515 {
3516 	int i;
3517 
3518 	for (i = 0; i < WME_NUM_AC; i++) {
3519 
3520 		if ((wmeparam[i].wmep_logcwmax > QOS_CW_RANGE_MAX) ||
3521 		    (wmeparam[i].wmep_logcwmin >= wmeparam[i].wmep_logcwmax)) {
3522 			cmn_err(CE_WARN, "iwn_wmeparam_check(): "
3523 			    "Contention window is not in suitable range.\n");
3524 			return (IWN_FAIL);
3525 		}
3526 
3527 		if ((wmeparam[i].wmep_aifsn < QOS_AIFSN_MIN) ||
3528 		    (wmeparam[i].wmep_aifsn > QOS_AIFSN_MAX)) {
3529 			dev_err(sc->sc_dip, CE_WARN, "!iwn_wmeparam_check(): "
3530 			    "Arbitration interframe space number"
3531 			    "is not in suitable range.\n");
3532 			return (IWN_FAIL);
3533 		}
3534 	}
3535 
3536 	return (IWN_SUCCESS);
3537 }
3538 
3539 /*
3540  * This function updates EDCA parameters into hardware.
3541  * FIFO0-background, FIFO1-best effort, FIFO2-video, FIFO3-voice.
3542  */
3543 static int
3544 iwn_qosparam_to_hw(struct iwn_softc *sc, int async)
3545 {
3546 	ieee80211com_t *ic = &sc->sc_ic;
3547 	ieee80211_node_t *in = ic->ic_bss;
3548 	struct wmeParams *wmeparam;
3549 	struct iwn_edca_params edcaparam;
3550 	int i, j;
3551 	int err = IWN_FAIL;
3552 
3553 	if ((in->in_flags & IEEE80211_NODE_QOS) &&
3554 	    (IEEE80211_M_STA == ic->ic_opmode)) {
3555 		wmeparam = ic->ic_wme.wme_chanParams.cap_wmeParams;
3556 	} else {
3557 		return (IWN_SUCCESS);
3558 	}
3559 
3560 	(void) memset(&edcaparam, 0, sizeof (edcaparam));
3561 
3562 	err = iwn_wmeparam_check(sc, wmeparam);
3563 	if (err != IWN_SUCCESS) {
3564 		return (err);
3565 	}
3566 
3567 	if (in->in_flags & IEEE80211_NODE_QOS) {
3568 		edcaparam.flags |= QOS_PARAM_FLG_UPDATE_EDCA;
3569 	}
3570 
3571 	if (in->in_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)) {
3572 		edcaparam.flags |= QOS_PARAM_FLG_TGN;
3573 	}
3574 
3575 	for (i = 0; i < WME_NUM_AC; i++) {
3576 
3577 		j = iwn_wme_to_qos_ac(sc, i);
3578 		if (j < QOS_AC_BK || j > QOS_AC_VO) {
3579 			return (IWN_FAIL);
3580 		}
3581 
3582 		sc->sc_edca->ac[j].cwmin.value.ul = edcaparam.ac[j].cwmin =
3583 		    iwn_cw_e_to_cw(wmeparam[i].wmep_logcwmin);
3584 		sc->sc_edca->ac[j].cwmax.value.ul = edcaparam.ac[j].cwmax =
3585 		    iwn_cw_e_to_cw(wmeparam[i].wmep_logcwmax);
3586 		sc->sc_edca->ac[j].aifsn.value.ul = edcaparam.ac[j].aifsn =
3587 		    wmeparam[i].wmep_aifsn;
3588 		sc->sc_edca->ac[j].txop.value.ul = edcaparam.ac[j].txoplimit =
3589 		    (uint16_t)(wmeparam[i].wmep_txopLimit * 32);
3590 	}
3591 
3592 	err = iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &edcaparam,
3593 	    sizeof (edcaparam), async);
3594 	if (err != IWN_SUCCESS) {
3595 		dev_err(sc->sc_dip, CE_WARN, "!iwn_qosparam_to_hw(): "
3596 		    "failed to update QoS parameters into hardware.");
3597 		return (err);
3598 	}
3599 
3600 	return (err);
3601 }
3602 
3603 static inline int
3604 iwn_wme_tid_qos_ac(int tid)
3605 {
3606 	switch (tid) {
3607 	case 1:
3608 	case 2:
3609 		return (QOS_AC_BK);
3610 	case 0:
3611 	case 3:
3612 		return (QOS_AC_BE);
3613 	case 4:
3614 	case 5:
3615 		return (QOS_AC_VI);
3616 	case 6:
3617 	case 7:
3618 		return (QOS_AC_VO);
3619 	}
3620 
3621 	return (QOS_AC_BE);
3622 }
3623 
3624 static inline int
3625 iwn_qos_ac_to_txq(int qos_ac)
3626 {
3627 	switch (qos_ac) {
3628 	case QOS_AC_BK:
3629 		return (QOS_AC_BK_TO_TXQ);
3630 	case QOS_AC_BE:
3631 		return (QOS_AC_BE_TO_TXQ);
3632 	case QOS_AC_VI:
3633 		return (QOS_AC_VI_TO_TXQ);
3634 	case QOS_AC_VO:
3635 		return (QOS_AC_VO_TO_TXQ);
3636 	}
3637 
3638 	return (QOS_AC_BE_TO_TXQ);
3639 }
3640 
3641 static int
3642 iwn_wme_tid_to_txq(struct iwn_softc *sc, int tid)
3643 {
3644 	int queue_n = TXQ_FOR_AC_INVALID;
3645 	int qos_ac;
3646 
3647 	if (tid < WME_TID_MIN ||
3648 	    tid > WME_TID_MAX) {
3649 		dev_err(sc->sc_dip, CE_WARN, "!wme_tid_to_txq(): "
3650 		    "TID is not in suitable range.");
3651 		return (queue_n);
3652 	}
3653 
3654 	qos_ac = iwn_wme_tid_qos_ac(tid);
3655 	queue_n = iwn_qos_ac_to_txq(qos_ac);
3656 
3657 	return (queue_n);
3658 }
3659 
3660 static int
3661 iwn_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
3662 {
3663 	struct iwn_softc *sc = (struct iwn_softc *)ic;
3664 	struct iwn_node *wn;
3665 	struct iwn_tx_ring *ring;
3666 	struct iwn_tx_desc *desc;
3667 	struct iwn_tx_data *data;
3668 	struct iwn_tx_cmd *cmd;
3669 	struct iwn_cmd_data *tx;
3670 	ieee80211_node_t *in;
3671 	const struct iwn_rate *rinfo;
3672 	struct ieee80211_frame *wh;
3673 	struct ieee80211_key *k = NULL;
3674 	uint32_t flags;
3675 	uint_t hdrlen;
3676 	uint8_t ridx, txant;
3677 	int i, totlen, seglen, pad;
3678 	int txq_id = NON_QOS_TXQ;
3679 	struct ieee80211_qosframe *qwh = NULL;
3680 	uint8_t tid = WME_TID_INVALID;
3681 	ddi_dma_cookie_t cookie;
3682 	mblk_t *m0, *m;
3683 	int mblen, off;
3684 
3685 	int noack = 0;
3686 
3687 	if (ic == NULL)
3688 		return (EIO);
3689 
3690 	if ((mp == NULL) || (MBLKL(mp) <= 0))
3691 		return (EIO);
3692 
3693 	if (sc->sc_flags & IWN_FLAG_SUSPEND) {
3694 		freemsg(mp);
3695 		sc->sc_tx_err++;
3696 		return(EIO);
3697 	}
3698 
3699 	wh = (struct ieee80211_frame *)mp->b_rptr;
3700 
3701 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3702 
3703 	/*
3704 	 * determine send which AP or station in IBSS
3705 	 */
3706 	in = ieee80211_find_txnode(ic, wh->i_addr1);
3707 	if (in == NULL) {
3708 		dev_err(sc->sc_dip, CE_WARN, "!iwn_send(): "
3709 		    "failed to find tx node");
3710 		freemsg(mp);
3711 		sc->sc_tx_err++;
3712 		return(EIO);
3713 	}
3714 
3715 	wn = (struct iwn_node *)in;
3716 
3717 	/*
3718 	 * Determine TX queue according to traffic ID in frame
3719 	 * if working in QoS mode.
3720 	 */
3721 	if (in->in_flags & IEEE80211_NODE_QOS) {
3722 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
3723 		    IEEE80211_FC0_TYPE_DATA) {
3724 			if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
3725 				qwh = (struct ieee80211_qosframe *)wh;
3726 
3727 				tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
3728 				txq_id = iwn_wme_tid_to_txq(sc, tid);
3729 
3730 				if (txq_id < TXQ_FOR_AC_MIN ||
3731 				    (txq_id > TXQ_FOR_AC_MAX)) {
3732 					freemsg(mp);
3733 					sc->sc_tx_err++;
3734 					return(EIO);
3735 				}
3736 			} else {
3737 				txq_id = NON_QOS_TXQ;
3738 			}
3739 		} else if ((type & IEEE80211_FC0_TYPE_MASK) ==
3740 		    IEEE80211_FC0_TYPE_MGT) {
3741 			txq_id = QOS_TXQ_FOR_MGT;
3742 		} else {
3743 			txq_id = NON_QOS_TXQ;
3744 		}
3745 	} else {
3746 		txq_id = NON_QOS_TXQ;
3747 	}
3748 
3749 	if (sc->qfullmsk & (1 << txq_id)) {
3750 		sc->sc_tx_err++;
3751 		/* net80211-initiated send */
3752 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3753 		    IEEE80211_FC0_TYPE_DATA)
3754 			freemsg(mp);
3755 		return (EAGAIN);
3756 	}
3757 
3758 	/* Choose a TX rate index. */
3759 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3760 	    type != IEEE80211_FC0_TYPE_DATA) {
3761 		ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3762 		    IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
3763 	} else if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3764 		ridx = sc->fixed_ridx;
3765 	} else
3766 		ridx = wn->ridx[in->in_txrate];
3767 	rinfo = &iwn_rates[ridx];
3768 
3769 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
3770 	if (m) {
3771 		for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3772 			mblen = MBLKL(m0);
3773 			bcopy(m0->b_rptr, m->b_rptr + off, mblen);
3774 			off += mblen;
3775 		}
3776 
3777 		m->b_wptr += off;
3778 
3779 		freemsg(mp);
3780 		mp = m;
3781 
3782 		wh = (struct ieee80211_frame *)mp->b_rptr;
3783 	} else {
3784 		dev_err(sc->sc_dip, CE_WARN, "!iwn_send(): can't copy");
3785 		/* net80211-initiated send */
3786 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3787 		    IEEE80211_FC0_TYPE_DATA)
3788 			freemsg(mp);
3789 		return (EAGAIN);
3790 	}
3791 
3792 
3793 	/*
3794 	 * Net80211 module encapsulate outbound data frames.
3795 	 * Add some fields of 80211 frame.
3796 	 */
3797 	if ((type & IEEE80211_FC0_TYPE_MASK) ==
3798 	    IEEE80211_FC0_TYPE_DATA)
3799 		(void) ieee80211_encap(ic, mp, in);
3800 
3801 	/* Encrypt the frame if need be. */
3802 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3803 		k = ieee80211_crypto_encap(ic, mp);
3804 		if (k == NULL) {
3805 			freemsg(mp);
3806 			return(EIO);
3807 		}
3808 		/* Packet header may have moved, reset our local pointer. */
3809 		wh = (struct ieee80211_frame *)mp->b_rptr;
3810 	}
3811 	totlen = msgdsize(mp);
3812 
3813 	mutex_enter(&sc->sc_tx_mtx);
3814 	ring = &sc->txq[txq_id];
3815 	desc = &ring->desc[ring->cur];
3816 	data = &ring->data[ring->cur];
3817 
3818 	/* Prepare TX firmware command. */
3819 	cmd = &ring->cmd[ring->cur];
3820 	cmd->code = IWN_CMD_TX_DATA;
3821 	cmd->flags = 0;
3822 	cmd->qid = ring->qid;
3823 	cmd->idx = ring->cur;
3824 
3825 	tx = (struct iwn_cmd_data *)cmd->data;
3826 	/* NB: No need to clear tx, all fields are reinitialized here. */
3827 	tx->scratch = 0;	/* clear "scratch" area */
3828 
3829 	flags = 0;
3830 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3831 		/* Unicast frame, check if an ACK is expected. */
3832 		if (!noack)
3833 			flags |= IWN_TX_NEED_ACK;
3834 	}
3835 
3836 	if ((wh->i_fc[0] &
3837 	    (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3838 	    (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3839 		flags |= IWN_TX_IMM_BA;		/* Cannot happen yet. */
3840 
3841 	ASSERT((flags & IWN_TX_IMM_BA) == 0);
3842 
3843 	if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3844 		flags |= IWN_TX_MORE_FRAG;	/* Cannot happen yet. */
3845 
3846 	ASSERT((flags & IWN_TX_MORE_FRAG) == 0);
3847 
3848 	/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3849 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3850 		/* NB: Group frames are sent using CCK in 802.11b/g. */
3851 		if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
3852 			flags |= IWN_TX_NEED_RTS;
3853 		} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3854 		    ridx >= IWN_RIDX_OFDM6) {
3855 			if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3856 				flags |= IWN_TX_NEED_CTS;
3857 			else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3858 				flags |= IWN_TX_NEED_RTS;
3859 		}
3860 		if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3861 			if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3862 				/* 5000 autoselects RTS/CTS or CTS-to-self. */
3863 				flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3864 				flags |= IWN_TX_NEED_PROTECTION;
3865 			} else
3866 				flags |= IWN_TX_FULL_TXOP;
3867 		}
3868 	}
3869 
3870 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3871 	    type != IEEE80211_FC0_TYPE_DATA)
3872 		tx->id = sc->broadcast_id;
3873 	else
3874 		tx->id = wn->id;
3875 
3876 	if (type == IEEE80211_FC0_TYPE_MGT) {
3877 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3878 
3879 #ifndef IEEE80211_STA_ONLY
3880 		/* Tell HW to set timestamp in probe responses. */
3881 		/* XXX NetBSD rev 1.11 added probe requests here but */
3882 		/* probe requests do not take timestamps (from Bergamini). */
3883 		if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3884 			flags |= IWN_TX_INSERT_TSTAMP;
3885 #endif
3886 		/* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */
3887 		/* changes here. These are not needed (from Bergamini). */
3888 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3889 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3890 			tx->timeout = htole16(3);
3891 		else
3892 			tx->timeout = htole16(2);
3893 	} else
3894 		tx->timeout = htole16(0);
3895 
3896 	if (hdrlen & 3) {
3897 		/* First segment length must be a multiple of 4. */
3898 		flags |= IWN_TX_NEED_PADDING;
3899 		pad = 4 - (hdrlen & 3);
3900 	} else
3901 		pad = 0;
3902 
3903 	if (tid != WME_TID_INVALID) {
3904 		flags &= ~IWN_TX_AUTO_SEQ;
3905 	} else {
3906 		flags |= IWN_TX_AUTO_SEQ;
3907 		tid = 0;
3908 	}
3909 
3910 	tx->len = htole16(totlen);
3911 	tx->tid = tid;
3912 	tx->rts_ntries = 60;
3913 	tx->data_ntries = 15;
3914 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3915 	tx->plcp = rinfo->plcp;
3916 	tx->rflags = rinfo->flags;
3917 	if (tx->id == sc->broadcast_id) {
3918 		/* Group or management frame. */
3919 		tx->linkq = 0;
3920 		/* XXX Alternate between antenna A and B? */
3921 		txant = IWN_LSB(sc->txchainmask);
3922 		tx->rflags |= IWN_RFLAG_ANT(txant);
3923 	} else {
3924 		tx->linkq = in->in_rates.ir_nrates - in->in_txrate - 1;
3925 		flags |= IWN_TX_LINKQ;	/* enable MRR */
3926 	}
3927 	/* Set physical address of "scratch area". */
3928 	tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3929 	tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3930 
3931 	/* Copy 802.11 header in TX command. */
3932 	/* XXX NetBSD changed this in rev 1.20 */
3933 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3934 	mp->b_rptr += hdrlen;
3935 
3936 	bcopy(mp->b_rptr, data->dma_data.vaddr, totlen - hdrlen);
3937 	tx->security = 0;
3938 	tx->flags = htole32(flags);
3939 
3940 	data->ni = in;
3941 
3942 	DTRACE_PROBE4(tx, int, ring->qid, int, ring->cur, size_t, MBLKL(mp),
3943 	    int, data->dma_data.ncookies);
3944 
3945 	/* Fill TX descriptor. */
3946 	desc->nsegs = 1 + data->dma_data.ncookies;
3947 	/* First DMA segment is used by the TX command. */
3948 	desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3949 	desc->segs[0].len  = htole16(IWN_HIADDR(data->cmd_paddr) |
3950 	    (4 + sizeof (*tx) + hdrlen + pad) << 4);
3951 
3952 	/* Other DMA segments are for data payload. */
3953 	cookie = data->dma_data.cookie;
3954 	for (i = 1, seglen = totlen - hdrlen;
3955 	     i <= data->dma_data.ncookies;
3956 	     i++, seglen -= cookie.dmac_size) {
3957 		desc->segs[i].addr = htole32(IWN_LOADDR(cookie.dmac_laddress));
3958 		desc->segs[i].len  = htole16(IWN_HIADDR(cookie.dmac_laddress) |
3959 		    seglen << 4);
3960 		if (i < data->dma_data.ncookies)
3961 			ddi_dma_nextcookie(data->dma_data.dma_hdl, &cookie);
3962 	}
3963 
3964 	(void) ddi_dma_sync(data->dma_data.dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
3965 	(void) ddi_dma_sync(ring->cmd_dma.dma_hdl, ring->cur * sizeof (*cmd),
3966 	    sizeof (*cmd), DDI_DMA_SYNC_FORDEV);
3967 	(void) ddi_dma_sync(ring->desc_dma.dma_hdl, ring->cur * sizeof (*desc),
3968 	    sizeof (*desc), DDI_DMA_SYNC_FORDEV);
3969 
3970 	/* Update TX scheduler. */
3971 	sc->ops.update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3972 
3973 	/* Kick TX ring. */
3974 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3975 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3976 
3977 	/* Mark TX ring as full if we reach a certain threshold. */
3978 	if (++ring->queued > IWN_TX_RING_HIMARK)
3979 		sc->qfullmsk |= 1 << ring->qid;
3980 	mutex_exit(&sc->sc_tx_mtx);
3981 	freemsg(mp);
3982 
3983 	ic->ic_stats.is_tx_bytes += totlen;
3984 
3985 	mutex_enter(&sc->sc_mt_mtx);
3986 	if (sc->sc_tx_timer == 0)
3987 		sc->sc_tx_timer = 5;
3988 	mutex_exit(&sc->sc_mt_mtx);
3989 
3990 	return 0;
3991 }
3992 
3993 static mblk_t *
3994 iwn_m_tx(void *arg, mblk_t *mp)
3995 {
3996 	struct iwn_softc *sc;
3997 	ieee80211com_t *ic;
3998 	mblk_t *next;
3999 
4000 	sc = (struct iwn_softc *)arg;
4001 	ASSERT(sc != NULL);
4002 	ic = &sc->sc_ic;
4003 
4004 	if (sc->sc_flags & IWN_FLAG_SUSPEND) {
4005 		freemsgchain(mp);
4006 		return (NULL);
4007 	}
4008 
4009 	if (ic->ic_state != IEEE80211_S_RUN) {
4010 		freemsgchain(mp);
4011 		return (NULL);
4012 	}
4013 
4014 	if ((sc->sc_flags & IWN_FLAG_HW_ERR_RECOVER)) {
4015 		freemsgchain(mp);
4016 		return (NULL);
4017 	}
4018 
4019 	while (mp != NULL) {
4020 		next = mp->b_next;
4021 		mp->b_next = NULL;
4022 		if (iwn_send(ic, mp, IEEE80211_FC0_TYPE_DATA) == EAGAIN) {
4023 			mp->b_next = next;
4024 			break;
4025 		}
4026 		mp = next;
4027 	}
4028 
4029 	return (mp);
4030 }
4031 
4032 static void
4033 iwn_watchdog(void *arg)
4034 {
4035 	struct iwn_softc *sc = (struct iwn_softc *)arg;
4036 	ieee80211com_t *ic = &sc->sc_ic;
4037 	timeout_id_t timeout_id = ic->ic_watchdog_timer;
4038 
4039 	ieee80211_stop_watchdog(ic);
4040 
4041 	mutex_enter(&sc->sc_mt_mtx);
4042 	if (sc->sc_tx_timer > 0) {
4043 		if (--sc->sc_tx_timer == 0) {
4044 			dev_err(sc->sc_dip, CE_WARN, "!device timeout");
4045 			sc->sc_flags |= IWN_FLAG_HW_ERR_RECOVER;
4046 			sc->sc_ostate = IEEE80211_S_RUN;
4047 			DTRACE_PROBE(recover__send__fail);
4048 		}
4049 	}
4050 	mutex_exit(&sc->sc_mt_mtx);
4051 
4052 	if ((ic->ic_state != IEEE80211_S_AUTH) &&
4053 	    (ic->ic_state != IEEE80211_S_ASSOC))
4054 		return;
4055 
4056 	if (ic->ic_bss->in_fails > 10) {
4057 		DTRACE_PROBE2(watchdog__reset, timeout_id_t, timeout_id,
4058 		    struct ieee80211node *, ic->ic_bss);
4059 		dev_err(sc->sc_dip, CE_WARN, "!iwn_watchdog reset");
4060 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
4061 	} else {
4062 		ic->ic_bss->in_fails++;
4063 
4064 		DTRACE_PROBE2(watchdog__timeout, timeout_id_t, timeout_id,
4065 		    struct ieee80211node *, ic->ic_bss);
4066 
4067 		ieee80211_watchdog(ic);
4068 	}
4069 }
4070 
4071 static void
4072 iwn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4073 {
4074 	struct iwn_softc *sc;
4075 	struct ieee80211com *ic;
4076 	int  error = 0;
4077 
4078 	sc = (struct iwn_softc *)arg;
4079 	ASSERT(sc != NULL);
4080 	ic = &sc->sc_ic;
4081 
4082 	mutex_enter(&sc->sc_mtx);
4083 	while (sc->sc_flags & IWN_FLAG_SCANNING)
4084 		cv_wait(&sc->sc_scan_cv, &sc->sc_mtx);
4085 	mutex_exit(&sc->sc_mtx);
4086 
4087 	error = ieee80211_ioctl(ic, wq, mp);
4088 	if (error == ENETRESET) {
4089 		/*
4090 		 * This is special for the hidden AP connection.
4091 		 * In any case, we should make sure only one 'scan'
4092 		 * in the driver for a 'connect' CLI command. So
4093 		 * when connecting to a hidden AP, the scan is just
4094 		 * sent out to the air when we know the desired
4095 		 * essid of the AP we want to connect.
4096 		 */
4097 		if (ic->ic_des_esslen) {
4098 			if (sc->sc_flags & IWN_FLAG_RUNNING) {
4099 				DTRACE_PROBE(netreset);
4100 				iwn_m_stop(sc);
4101 				(void) iwn_m_start(sc);
4102 				(void) ieee80211_new_state(ic,
4103 				    IEEE80211_S_SCAN, -1);
4104 			}
4105 		}
4106 	}
4107 }
4108 
4109 /*
4110  * Call back functions for get/set property
4111  */
4112 static int
4113 iwn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
4114     uint_t wldp_length, void *wldp_buf)
4115 {
4116 	struct iwn_softc *sc;
4117 
4118 	sc = (struct iwn_softc *)arg;
4119 	ASSERT(sc != NULL);
4120 
4121 	return (ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
4122 	    wldp_length, wldp_buf));
4123 }
4124 
4125 static void
4126 iwn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
4127     mac_prop_info_handle_t prh)
4128 {
4129 	struct iwn_softc *sc;
4130 
4131 	sc = (struct iwn_softc *)arg;
4132 	ASSERT(sc != NULL);
4133 
4134 	ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
4135 }
4136 
4137 static int
4138 iwn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
4139     uint_t wldp_length, const void *wldp_buf)
4140 {
4141 	struct iwn_softc *sc;
4142 	ieee80211com_t *ic;
4143 	int err = EINVAL;
4144 
4145 	sc = (struct iwn_softc *)arg;
4146 	ASSERT(sc != NULL);
4147 	ic = &sc->sc_ic;
4148 
4149 	mutex_enter(&sc->sc_mtx);
4150 	while (sc->sc_flags & IWN_FLAG_SCANNING)
4151 		cv_wait(&sc->sc_scan_cv, &sc->sc_mtx);
4152 	mutex_exit(&sc->sc_mtx);
4153 
4154 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
4155 	    wldp_buf);
4156 
4157 	if (err == ENETRESET) {
4158 		if (ic->ic_des_esslen) {
4159 			if (sc->sc_flags & IWN_FLAG_RUNNING) {
4160 				DTRACE_PROBE(netreset);
4161 				iwn_m_stop(sc);
4162 				(void) iwn_m_start(sc);
4163 				(void) ieee80211_new_state(ic,
4164 				    IEEE80211_S_SCAN, -1);
4165 			}
4166 		}
4167 		err = 0;
4168 	}
4169 
4170 	return (err);
4171 }
4172 
4173 /*
4174  * invoked by GLD get statistics from NIC and driver
4175  */
4176 static int
4177 iwn_m_stat(void *arg, uint_t stat, uint64_t *val)
4178 {
4179 	struct iwn_softc *sc;
4180 	ieee80211com_t *ic;
4181 	ieee80211_node_t *in;
4182 
4183 	sc = (struct iwn_softc *)arg;
4184 	ASSERT(sc != NULL);
4185 	ic = &sc->sc_ic;
4186 
4187 	mutex_enter(&sc->sc_mtx);
4188 
4189 	switch (stat) {
4190 	case MAC_STAT_IFSPEED:
4191 		in = ic->ic_bss;
4192 		*val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
4193 		    IEEE80211_RATE(in->in_txrate) :
4194 		    ic->ic_fixed_rate) / 2 * 1000000;
4195 		break;
4196 	case MAC_STAT_NOXMTBUF:
4197 		*val = sc->sc_tx_nobuf;
4198 		break;
4199 	case MAC_STAT_NORCVBUF:
4200 		*val = sc->sc_rx_nobuf;
4201 		break;
4202 	case MAC_STAT_IERRORS:
4203 		*val = sc->sc_rx_err;
4204 		break;
4205 	case MAC_STAT_RBYTES:
4206 		*val = ic->ic_stats.is_rx_bytes;
4207 		break;
4208 	case MAC_STAT_IPACKETS:
4209 		*val = ic->ic_stats.is_rx_frags;
4210 		break;
4211 	case MAC_STAT_OBYTES:
4212 		*val = ic->ic_stats.is_tx_bytes;
4213 		break;
4214 	case MAC_STAT_OPACKETS:
4215 		*val = ic->ic_stats.is_tx_frags;
4216 		break;
4217 	case MAC_STAT_OERRORS:
4218 	case WIFI_STAT_TX_FAILED:
4219 		*val = sc->sc_tx_err;
4220 		break;
4221 	case WIFI_STAT_TX_RETRANS:
4222 		*val = sc->sc_tx_retries;
4223 		break;
4224 	case WIFI_STAT_FCS_ERRORS:
4225 	case WIFI_STAT_WEP_ERRORS:
4226 	case WIFI_STAT_TX_FRAGS:
4227 	case WIFI_STAT_MCAST_TX:
4228 	case WIFI_STAT_RTS_SUCCESS:
4229 	case WIFI_STAT_RTS_FAILURE:
4230 	case WIFI_STAT_ACK_FAILURE:
4231 	case WIFI_STAT_RX_FRAGS:
4232 	case WIFI_STAT_MCAST_RX:
4233 	case WIFI_STAT_RX_DUPS:
4234 		mutex_exit(&sc->sc_mtx);
4235 		return (ieee80211_stat(ic, stat, val));
4236 	default:
4237 		mutex_exit(&sc->sc_mtx);
4238 		return (ENOTSUP);
4239 	}
4240 
4241 	mutex_exit(&sc->sc_mtx);
4242 
4243 	return (0);
4244 
4245 }
4246 
4247 /*
4248  * invoked by GLD to configure NIC
4249  */
4250 static int
4251 iwn_m_unicst(void *arg, const uint8_t *macaddr)
4252 {
4253 	struct iwn_softc *sc;
4254 	ieee80211com_t	*ic;
4255 	int err = IWN_SUCCESS;
4256 
4257 	sc = (struct iwn_softc *)arg;
4258 	ASSERT(sc != NULL);
4259 	ic = &sc->sc_ic;
4260 
4261 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
4262 		mutex_enter(&sc->sc_mtx);
4263 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
4264 		err = iwn_config(sc);
4265 		mutex_exit(&sc->sc_mtx);
4266 		if (err != IWN_SUCCESS) {
4267 			dev_err(sc->sc_dip, CE_WARN, "!iwn_m_unicst(): "
4268 			    "failed to configure device");
4269 			goto fail;
4270 		}
4271 	}
4272 
4273 	return (err);
4274 
4275 fail:
4276 	return (err);
4277 }
4278 
4279 /*ARGSUSED*/
4280 static int
4281 iwn_m_multicst(void *arg, boolean_t add, const uint8_t *m)
4282 {
4283 	return (IWN_SUCCESS);
4284 }
4285 
4286 /*ARGSUSED*/
4287 static int
4288 iwn_m_promisc(void *arg, boolean_t on)
4289 {
4290 	_NOTE(ARGUNUSED(on));
4291 
4292 	return (IWN_SUCCESS);
4293 }
4294 
4295 static void
4296 iwn_abort_scan(void *arg)
4297 {
4298 	struct iwn_softc *sc = (struct iwn_softc *)arg;
4299 	ieee80211com_t *ic = &sc->sc_ic;
4300 
4301 	mutex_enter(&sc->sc_mtx);
4302 	if ((sc->sc_flags & IWN_FLAG_SCANNING) == 0)
4303 		return;
4304 
4305 	dev_err(sc->sc_dip, CE_WARN,
4306 	    "!aborting scan, flags = %x, state = %s",
4307 	    sc->sc_flags, ieee80211_state_name[ic->ic_state]);
4308 	sc->sc_flags &= ~IWN_FLAG_SCANNING;
4309 	iwn_hw_stop(sc, B_FALSE);
4310 	mutex_exit(&sc->sc_mtx);
4311 
4312 	sc->scan_to = 0;
4313 	(void) iwn_init(sc);
4314 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
4315 }
4316 
4317 /*
4318  * periodic function to deal with RF switch and HW error recovery
4319  */
4320 static void
4321 iwn_periodic(void *arg)
4322 {
4323 	struct iwn_softc *sc = (struct iwn_softc *)arg;
4324 	ieee80211com_t	*ic = &sc->sc_ic;
4325 	int err;
4326 	uint32_t tmp;
4327 
4328 	mutex_enter(&sc->sc_mtx);
4329 	tmp = IWN_READ(sc, IWN_GP_CNTRL);
4330 	if (tmp & IWN_GP_CNTRL_RFKILL) {
4331 		sc->sc_flags &= ~IWN_FLAG_RADIO_OFF;
4332 	} else {
4333 		sc->sc_flags |= IWN_FLAG_RADIO_OFF;
4334 	}
4335 
4336 	/*
4337 	 * If the RF is OFF, do nothing.
4338 	 */
4339 	if (sc->sc_flags & IWN_FLAG_RADIO_OFF) {
4340 		mutex_exit(&sc->sc_mtx);
4341 		return;
4342 	}
4343 
4344 	mutex_exit(&sc->sc_mtx);
4345 
4346 	/*
4347 	 * recovery fatal error
4348 	 */
4349 	if (ic->ic_mach &&
4350 	    (sc->sc_flags & IWN_FLAG_HW_ERR_RECOVER)) {
4351 		dev_err(sc->sc_dip, CE_WARN,
4352 		    "!trying to restore previous state");
4353 
4354 		mutex_enter(&sc->sc_mtx);
4355 		sc->sc_flags |= IWN_FLAG_STOP_CALIB_TO;
4356 		mutex_exit(&sc->sc_mtx);
4357 
4358 		if (sc->calib_to != 0)
4359 			(void) untimeout(sc->calib_to);
4360 		sc->calib_to = 0;
4361 
4362 		if (sc->scan_to != 0)
4363 			(void) untimeout(sc->scan_to);
4364 		sc->scan_to = 0;
4365 
4366 		iwn_hw_stop(sc, B_TRUE);
4367 
4368 		if (IWN_CHK_FAST_RECOVER(sc)) {
4369 			/* save runtime configuration */
4370 			bcopy(&sc->rxon, &sc->rxon_save, sizeof (sc->rxon));
4371 		} else {
4372 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
4373 		}
4374 
4375 		err = iwn_init(sc);
4376 		if (err != IWN_SUCCESS)
4377 			return;
4378 
4379 		mutex_enter(&sc->sc_mtx);
4380 		sc->sc_flags |= IWN_FLAG_RUNNING;
4381 		mutex_exit(&sc->sc_mtx);
4382 
4383 		if (!IWN_CHK_FAST_RECOVER(sc) ||
4384 		    iwn_fast_recover(sc) != IWN_SUCCESS) {
4385 			mutex_enter(&sc->sc_mtx);
4386 			sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
4387 			mutex_exit(&sc->sc_mtx);
4388 			if (sc->sc_ostate != IEEE80211_S_INIT) {
4389 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
4390 			}
4391 		}
4392 	}
4393 }
4394 
4395 /*
4396  * Send a command to the firmware.
4397  */
4398 static int
4399 iwn_cmd(struct iwn_softc *sc, uint8_t code, void *buf, int size, int async)
4400 {
4401 	struct iwn_tx_ring *ring = &sc->txq[IWN_CMD_QUEUE_NUM];
4402 	struct iwn_tx_desc *desc;
4403 	struct iwn_tx_data *data;
4404 	struct iwn_tx_cmd *cmd;
4405 	clock_t clk;
4406 	uintptr_t paddr;
4407 	int totlen, ret;
4408 
4409 	ASSERT(mutex_owned(&sc->sc_mtx));
4410 
4411 	desc = &ring->desc[ring->cur];
4412 	data = &ring->data[ring->cur];
4413 	totlen = 4 + size;
4414 
4415 	if (size > sizeof (cmd->data)) {
4416 		/* Command is too large to fit in a descriptor. */
4417 		if (iwn_dma_contig_alloc(sc, &data->cmd_dma, totlen,
4418 		    DDI_DMA_CONSISTENT | DDI_DMA_RDWR, (void **)&cmd,
4419 		    &iwn_dma_accattr, 1) != DDI_SUCCESS)
4420 			return ENOBUFS;
4421 		paddr = data->cmd_dma.paddr;
4422 	} else {
4423 		cmd = &ring->cmd[ring->cur];
4424 		paddr = data->cmd_paddr;
4425 	}
4426 
4427 	cmd->code = code;
4428 	cmd->flags = 0;
4429 	cmd->qid = ring->qid;
4430 	cmd->idx = ring->cur;
4431 	bzero(cmd->data, size);
4432 	memcpy(cmd->data, buf, size);
4433 
4434 	bzero(desc, sizeof(*desc));
4435 	desc->nsegs = 1;
4436 	desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
4437 	desc->segs[0].len  = htole16(IWN_HIADDR(paddr) | totlen << 4);
4438 
4439 	if (size > sizeof cmd->data) {
4440 		(void) ddi_dma_sync(data->cmd_dma.dma_hdl, 0, totlen,
4441 		    DDI_DMA_SYNC_FORDEV);
4442 	} else {
4443 		(void) ddi_dma_sync(ring->cmd_dma.dma_hdl,
4444 		    ring->cur * sizeof (*cmd),
4445 		    totlen, DDI_DMA_SYNC_FORDEV);
4446 	}
4447 	(void) ddi_dma_sync(ring->desc_dma.dma_hdl,
4448 	    ring->cur * sizeof (*desc),
4449 	    sizeof (*desc), DDI_DMA_SYNC_FORDEV);
4450 
4451 	/* Update TX scheduler. */
4452 	sc->ops.update_sched(sc, ring->qid, ring->cur, 0, 0);
4453 
4454 	/* Kick command ring. */
4455 	ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4456 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4457 
4458 	if (async)
4459 		return (IWN_SUCCESS);
4460 
4461 	sc->sc_cmd_flag = SC_CMD_FLG_NONE;
4462 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
4463 	while (sc->sc_cmd_flag != SC_CMD_FLG_DONE)
4464 		if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_mtx, clk) < 0)
4465 			break;
4466 
4467 	ret = (sc->sc_cmd_flag == SC_CMD_FLG_DONE) ? IWN_SUCCESS : IWN_FAIL;
4468 	sc->sc_cmd_flag = SC_CMD_FLG_NONE;
4469 
4470 	return (ret);
4471 }
4472 
4473 static int
4474 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4475 {
4476 	struct iwn4965_node_info hnode;
4477 	char *src, *dst;
4478 
4479 	/*
4480 	 * We use the node structure for 5000 Series internally (it is
4481 	 * a superset of the one for 4965AGN). We thus copy the common
4482 	 * fields before sending the command.
4483 	 */
4484 	src = (char *)node;
4485 	dst = (char *)&hnode;
4486 	memcpy(dst, src, 48);
4487 	/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4488 	memcpy(dst + 48, src + 72, 20);
4489 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4490 }
4491 
4492 static int
4493 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4494 {
4495 	/* Direct mapping. */
4496 	return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4497 }
4498 
4499 static int
4500 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4501 {
4502 	struct iwn_node *wn = (void *)ni;
4503 	struct ieee80211_rateset *rs = &ni->in_rates;
4504 	struct iwn_cmd_link_quality linkq;
4505 	const struct iwn_rate *rinfo;
4506 	uint8_t txant;
4507 	int i, txrate;
4508 
4509 	/* Use the first valid TX antenna. */
4510 	txant = IWN_LSB(sc->txchainmask);
4511 
4512 	memset(&linkq, 0, sizeof linkq);
4513 	linkq.id = wn->id;
4514 	linkq.antmsk_1stream = txant;
4515 	linkq.antmsk_2stream = IWN_ANT_AB;
4516 	linkq.ampdu_max = 31;
4517 	linkq.ampdu_threshold = 3;
4518 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4519 
4520 	/* Start at highest available bit-rate. */
4521 	txrate = rs->ir_nrates - 1;
4522 	for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4523 		rinfo = &iwn_rates[wn->ridx[txrate]];
4524 		linkq.retry[i].plcp = rinfo->plcp;
4525 		linkq.retry[i].rflags = rinfo->flags;
4526 		linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
4527 		/* Next retry at immediate lower bit-rate. */
4528 		if (txrate > 0)
4529 			txrate--;
4530 	}
4531 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4532 }
4533 
4534 /*
4535  * Broadcast node is used to send group-addressed and management frames.
4536  */
4537 static int
4538 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4539 {
4540 	struct iwn_ops *ops = &sc->ops;
4541 	struct iwn_node_info node;
4542 	struct iwn_cmd_link_quality linkq;
4543 	const struct iwn_rate *rinfo;
4544 	uint8_t txant;
4545 	int i, error;
4546 
4547 	memset(&node, 0, sizeof node);
4548 	IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
4549 	node.id = sc->broadcast_id;
4550 	DTRACE_PROBE(add__broadcast__node);
4551 	if ((error = ops->add_node(sc, &node, async)) != 0)
4552 		return error;
4553 
4554 	/* Use the first valid TX antenna. */
4555 	txant = IWN_LSB(sc->txchainmask);
4556 
4557 	memset(&linkq, 0, sizeof linkq);
4558 	linkq.id = sc->broadcast_id;
4559 	linkq.antmsk_1stream = txant;
4560 	linkq.antmsk_2stream = IWN_ANT_AB;
4561 	linkq.ampdu_max = 64;
4562 	linkq.ampdu_threshold = 3;
4563 	linkq.ampdu_limit = htole16(4000);	/* 4ms */
4564 
4565 	/* Use lowest mandatory bit-rate. */
4566 	rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ?
4567 	    &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6];
4568 	linkq.retry[0].plcp = rinfo->plcp;
4569 	linkq.retry[0].rflags = rinfo->flags;
4570 	linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
4571 	/* Use same bit-rate for all TX retries. */
4572 	for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4573 		linkq.retry[i].plcp = linkq.retry[0].plcp;
4574 		linkq.retry[i].rflags = linkq.retry[0].rflags;
4575 	}
4576 	return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4577 }
4578 
4579 static void
4580 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4581 {
4582 	struct iwn_cmd_led led;
4583 
4584 	/* Clear microcode LED ownership. */
4585 	IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4586 
4587 	led.which = which;
4588 	led.unit = htole32(10000);	/* on/off in unit of 100ms */
4589 	led.off = off;
4590 	led.on = on;
4591 	DTRACE_PROBE1(led__change, const char *,
4592 	    (off != 0 && on != 0) ? "blinking" :
4593 	    (off != 0) ? "off" : "on");
4594 	(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4595 }
4596 
4597 /*
4598  * Set the critical temperature at which the firmware will stop the radio
4599  * and notify us.
4600  */
4601 static int
4602 iwn_set_critical_temp(struct iwn_softc *sc)
4603 {
4604 	struct iwn_critical_temp crit;
4605 	int32_t temp;
4606 
4607 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4608 
4609 	if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4610 		temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4611 	else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4612 		temp = IWN_CTOK(110);
4613 	else
4614 		temp = 110;
4615 
4616 	sc->sc_misc->crit_temp.value.ul = temp;
4617 
4618 	memset(&crit, 0, sizeof crit);
4619 	crit.tempR = htole32(temp);
4620 	return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4621 }
4622 
4623 static int
4624 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4625 {
4626 	struct iwn_cmd_timing cmd;
4627 	uint64_t val, mod;
4628 
4629 	memset(&cmd, 0, sizeof cmd);
4630 	memcpy(&cmd.tstamp, ni->in_tstamp.data, sizeof (uint64_t));
4631 	cmd.bintval = htole16(ni->in_intval);
4632 	cmd.lintval = htole16(10);
4633 
4634 	/* Compute remaining time until next beacon. */
4635 	val = (uint64_t)ni->in_intval * 1024;	/* msecs -> usecs */
4636 	mod = le64toh(cmd.tstamp) % val;
4637 	cmd.binitval = htole32((uint32_t)(val - mod));
4638 
4639 	sc->sc_timing->bintval.value.ul = ni->in_intval;
4640 	sc->sc_timing->tstamp.value.ul = ni->in_tstamp.tsf;
4641 	sc->sc_timing->init.value.ul = (uint32_t)(val - mod);
4642 
4643 	return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4644 }
4645 
4646 static void
4647 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4648 {
4649 	/* Adjust TX power if need be (delta >= 3 degC). */
4650 	IWN_DBG("temperature %d->%d", sc->temp, temp);
4651 	if (abs(temp - sc->temp) >= 3) {
4652 		/* Record temperature of last calibration. */
4653 		sc->temp = temp;
4654 		(void)iwn4965_set_txpower(sc, 1);
4655 	}
4656 }
4657 
4658 /*
4659  * Set TX power for current channel (each rate has its own power settings).
4660  * This function takes into account the regulatory information from EEPROM,
4661  * the current temperature and the current voltage.
4662  */
4663 static int
4664 iwn4965_set_txpower(struct iwn_softc *sc, int async)
4665 {
4666 /* Fixed-point arithmetic division using a n-bit fractional part. */
4667 #define fdivround(a, b, n)	\
4668 	((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4669 /* Linear interpolation. */
4670 #define interpolate(x, x1, y1, x2, y2, n)	\
4671 	((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4672 
4673 	static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4674 	struct ieee80211com *ic = &sc->sc_ic;
4675 	struct iwn_ucode_info *uc = &sc->ucode_info;
4676 	struct ieee80211_channel *ch;
4677 	struct iwn4965_cmd_txpower cmd;
4678 	struct iwn4965_eeprom_chan_samples *chans;
4679 	const uint8_t *rf_gain, *dsp_gain;
4680 	int32_t vdiff, tdiff;
4681 	int i, c, grp, maxpwr;
4682 	uint8_t chan;
4683 
4684 	/* Retrieve current channel from last RXON. */
4685 	chan = sc->rxon.chan;
4686 	sc->sc_txpower->chan.value.l = chan;
4687 	ch = &ic->ic_sup_channels[chan];
4688 
4689 	memset(&cmd, 0, sizeof cmd);
4690 	cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4691 	cmd.chan = chan;
4692 
4693 	if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4694 		maxpwr   = sc->maxpwr5GHz;
4695 		rf_gain  = iwn4965_rf_gain_5ghz;
4696 		dsp_gain = iwn4965_dsp_gain_5ghz;
4697 	} else {
4698 		maxpwr   = sc->maxpwr2GHz;
4699 		rf_gain  = iwn4965_rf_gain_2ghz;
4700 		dsp_gain = iwn4965_dsp_gain_2ghz;
4701 	}
4702 
4703 	/* Compute voltage compensation. */
4704 	vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4705 	if (vdiff > 0)
4706 		vdiff *= 2;
4707 	if (abs(vdiff) > 2)
4708 		vdiff = 0;
4709 	sc->sc_txpower->vdiff.value.l = vdiff;
4710 
4711 	/* Get channel attenuation group. */
4712 	if (chan <= 20)		/* 1-20 */
4713 		grp = 4;
4714 	else if (chan <= 43)	/* 34-43 */
4715 		grp = 0;
4716 	else if (chan <= 70)	/* 44-70 */
4717 		grp = 1;
4718 	else if (chan <= 124)	/* 71-124 */
4719 		grp = 2;
4720 	else			/* 125-200 */
4721 		grp = 3;
4722 	sc->sc_txpower->group.value.l = grp;
4723 
4724 	/* Get channel sub-band. */
4725 	for (i = 0; i < IWN_NBANDS; i++)
4726 		if (sc->bands[i].lo != 0 &&
4727 		    sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4728 			break;
4729 	if (i == IWN_NBANDS)	/* Can't happen in real-life. */
4730 		return EINVAL;
4731 	chans = sc->bands[i].chans;
4732 	sc->sc_txpower->subband.value.l = i;
4733 
4734 	for (c = 0; c < 2; c++) {
4735 		uint8_t power, gain, temp;
4736 		int maxchpwr, pwr, ridx, idx;
4737 
4738 		power = interpolate(chan,
4739 		    chans[0].num, chans[0].samples[c][1].power,
4740 		    chans[1].num, chans[1].samples[c][1].power, 1);
4741 		gain  = interpolate(chan,
4742 		    chans[0].num, chans[0].samples[c][1].gain,
4743 		    chans[1].num, chans[1].samples[c][1].gain, 1);
4744 		temp  = interpolate(chan,
4745 		    chans[0].num, chans[0].samples[c][1].temp,
4746 		    chans[1].num, chans[1].samples[c][1].temp, 1);
4747 		sc->sc_txpower->txchain[c].power.value.l = power;
4748 		sc->sc_txpower->txchain[c].gain.value.l = gain;
4749 		sc->sc_txpower->txchain[c].temp.value.l = temp;
4750 
4751 		/* Compute temperature compensation. */
4752 		tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4753 		sc->sc_txpower->txchain[c].tcomp.value.l = tdiff;
4754 
4755 		for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4756 			/* Convert dBm to half-dBm. */
4757 			maxchpwr = sc->maxpwr[chan] * 2;
4758 			if ((ridx / 8) & 1)
4759 				maxchpwr -= 6;	/* MIMO 2T: -3dB */
4760 
4761 			pwr = maxpwr;
4762 
4763 			/* Adjust TX power based on rate. */
4764 			if ((ridx % 8) == 5)
4765 				pwr -= 15;	/* OFDM48: -7.5dB */
4766 			else if ((ridx % 8) == 6)
4767 				pwr -= 17;	/* OFDM54: -8.5dB */
4768 			else if ((ridx % 8) == 7)
4769 				pwr -= 20;	/* OFDM60: -10dB */
4770 			else
4771 				pwr -= 10;	/* Others: -5dB */
4772 
4773 			/* Do not exceed channel max TX power. */
4774 			if (pwr > maxchpwr)
4775 				pwr = maxchpwr;
4776 
4777 			idx = gain - (pwr - power) - tdiff - vdiff;
4778 			if ((ridx / 8) & 1)	/* MIMO */
4779 				idx += (int32_t)le32toh(uc->atten[grp][c]);
4780 
4781 			if (cmd.band == 0)
4782 				idx += 9;	/* 5GHz */
4783 			if (ridx == IWN_RIDX_MAX)
4784 				idx += 5;	/* CCK */
4785 
4786 			/* Make sure idx stays in a valid range. */
4787 			if (idx < 0)
4788 				idx = 0;
4789 			else if (idx > IWN4965_MAX_PWR_INDEX)
4790 				idx = IWN4965_MAX_PWR_INDEX;
4791 
4792 			sc->sc_txpower->txchain[c].rate[ridx].rf_gain.value.l =
4793 			    cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4794 			sc->sc_txpower->txchain[c].rate[ridx].dsp_gain.value.l =
4795 			    cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4796 		}
4797 	}
4798 
4799 	return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4800 
4801 #undef interpolate
4802 #undef fdivround
4803 }
4804 
4805 static int
4806 iwn5000_set_txpower(struct iwn_softc *sc, int async)
4807 {
4808 	struct iwn5000_cmd_txpower cmd;
4809 
4810 	/*
4811 	 * TX power calibration is handled automatically by the firmware
4812 	 * for 5000 Series.
4813 	 */
4814 	memset(&cmd, 0, sizeof cmd);
4815 	cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM;	/* 16 dBm */
4816 	cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4817 	cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4818 	return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4819 }
4820 
4821 /*
4822  * Retrieve the maximum RSSI (in dBm) among receivers.
4823  */
4824 static int
4825 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
4826 {
4827 	const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf;
4828 	uint8_t mask, agc;
4829 	int rssi;
4830 
4831 	mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4832 	agc  = (le16toh(phy->agc) >> 7) & 0x7f;
4833 
4834 	rssi = 0;
4835 	if (mask & IWN_ANT_A)
4836 		rssi = MAX(rssi, phy->rssi[0]);
4837 	if (mask & IWN_ANT_B)
4838 		rssi = MAX(rssi, phy->rssi[2]);
4839 	if (mask & IWN_ANT_C)
4840 		rssi = MAX(rssi, phy->rssi[4]);
4841 
4842 	return rssi - agc - IWN_RSSI_TO_DBM;
4843 }
4844 
4845 static int
4846 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
4847 {
4848 	const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf;
4849 	uint8_t agc;
4850 	int rssi;
4851 
4852 	agc = (le32toh(phy->agc) >> 9) & 0x7f;
4853 
4854 	rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4855 		   le16toh(phy->rssi[1]) & 0xff);
4856 	rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4857 
4858 	return rssi - agc - IWN_RSSI_TO_DBM;
4859 }
4860 
4861 /*
4862  * Retrieve the average noise (in dBm) among receivers.
4863  */
4864 static int
4865 iwn_get_noise(const struct iwn_rx_general_stats *stats)
4866 {
4867 	int i, total, nbant, noise;
4868 
4869 	total = nbant = 0;
4870 	for (i = 0; i < 3; i++) {
4871 		if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4872 			continue;
4873 		total += noise;
4874 		nbant++;
4875 	}
4876 	/* There should be at least one antenna but check anyway. */
4877 	return (nbant == 0) ? -127 : (total / nbant) - 107;
4878 }
4879 
4880 /*
4881  * Compute temperature (in degC) from last received statistics.
4882  */
4883 static int
4884 iwn4965_get_temperature(struct iwn_softc *sc)
4885 {
4886 	struct iwn_ucode_info *uc = &sc->ucode_info;
4887 	int32_t r1, r2, r3, r4, temp;
4888 
4889 	r1 = le32toh(uc->temp[0].chan20MHz);
4890 	r2 = le32toh(uc->temp[1].chan20MHz);
4891 	r3 = le32toh(uc->temp[2].chan20MHz);
4892 	r4 = le32toh(sc->rawtemp);
4893 
4894 	if (r1 == r3)	/* Prevents division by 0 (should not happen). */
4895 		return 0;
4896 
4897 	/* Sign-extend 23-bit R4 value to 32-bit. */
4898 	r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4899 	/* Compute temperature in Kelvin. */
4900 	temp = (259 * (r4 - r2)) / (r3 - r1);
4901 	temp = (temp * 97) / 100 + 8;
4902 
4903 	return IWN_KTOC(temp);
4904 }
4905 
4906 static int
4907 iwn5000_get_temperature(struct iwn_softc *sc)
4908 {
4909 	int32_t temp;
4910 
4911 	/*
4912 	 * Temperature is not used by the driver for 5000 Series because
4913 	 * TX power calibration is handled by firmware.  We export it to
4914 	 * users through a kstat though.
4915 	 */
4916 	temp = le32toh(sc->rawtemp);
4917 	if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4918 		temp = (temp / -5) + sc->temp_off;
4919 		temp = IWN_KTOC(temp);
4920 	}
4921 	return temp;
4922 }
4923 
4924 /*
4925  * Initialize sensitivity calibration state machine.
4926  */
4927 static int
4928 iwn_init_sensitivity(struct iwn_softc *sc)
4929 {
4930 	struct iwn_ops *ops = &sc->ops;
4931 	struct iwn_calib_state *calib = &sc->calib;
4932 	uint32_t flags;
4933 	int error;
4934 
4935 	/* Reset calibration state machine. */
4936 	memset(calib, 0, sizeof (*calib));
4937 	calib->state = IWN_CALIB_STATE_INIT;
4938 	calib->cck_state = IWN_CCK_STATE_HIFA;
4939 	/* Set initial correlation values. */
4940 	calib->ofdm_x1     = sc->limits->min_ofdm_x1;
4941 	calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4942 	calib->ofdm_x4     = sc->limits->min_ofdm_x4;
4943 	calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4944 	calib->cck_x4      = 125;
4945 	calib->cck_mrc_x4  = sc->limits->min_cck_mrc_x4;
4946 	calib->energy_cck  = sc->limits->energy_cck;
4947 
4948 	/* Write initial sensitivity. */
4949 	if ((error = iwn_send_sensitivity(sc)) != 0)
4950 		return error;
4951 
4952 	/* Write initial gains. */
4953 	if ((error = ops->init_gains(sc)) != 0)
4954 		return error;
4955 
4956 	/* Request statistics at each beacon interval. */
4957 	flags = 0;
4958 	return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4959 }
4960 
4961 /*
4962  * Collect noise and RSSI statistics for the first 20 beacons received
4963  * after association and use them to determine connected antennas and
4964  * to set differential gains.
4965  */
4966 static void
4967 iwn_collect_noise(struct iwn_softc *sc,
4968     const struct iwn_rx_general_stats *stats)
4969 {
4970 	struct iwn_ops *ops = &sc->ops;
4971 	struct iwn_calib_state *calib = &sc->calib;
4972 	uint32_t val;
4973 	int i;
4974 
4975 	/* Accumulate RSSI and noise for all 3 antennas. */
4976 	for (i = 0; i < 3; i++) {
4977 		calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4978 		calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4979 	}
4980 	/* NB: We update differential gains only once after 20 beacons. */
4981 	if (++calib->nbeacons < 20)
4982 		return;
4983 
4984 	/* Determine highest average RSSI. */
4985 	val = MAX(calib->rssi[0], calib->rssi[1]);
4986 	val = MAX(calib->rssi[2], val);
4987 
4988 	/* Determine which antennas are connected. */
4989 	sc->chainmask = sc->rxchainmask;
4990 	for (i = 0; i < 3; i++)
4991 		if (val - calib->rssi[i] > 15 * 20)
4992 			sc->chainmask &= ~(1 << i);
4993 
4994 	sc->sc_ant->conn_ant.value.ul = sc->chainmask;
4995 
4996 	/* If none of the TX antennas are connected, keep at least one. */
4997 	if ((sc->chainmask & sc->txchainmask) == 0)
4998 		sc->chainmask |= IWN_LSB(sc->txchainmask);
4999 
5000 	(void)ops->set_gains(sc);
5001 	calib->state = IWN_CALIB_STATE_RUN;
5002 
5003 #ifdef notyet
5004 	/* XXX Disable RX chains with no antennas connected. */
5005 	sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
5006 	DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
5007 	(void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5008 #endif
5009 
5010 	/* Enable power-saving mode if requested by user. */
5011 	if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
5012 		(void)iwn_set_pslevel(sc, 0, 3, 1);
5013 }
5014 
5015 static int
5016 iwn4965_init_gains(struct iwn_softc *sc)
5017 {
5018 	struct iwn_phy_calib_gain cmd;
5019 
5020 	memset(&cmd, 0, sizeof cmd);
5021 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5022 	/* Differential gains initially set to 0 for all 3 antennas. */
5023 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5024 }
5025 
5026 static int
5027 iwn5000_init_gains(struct iwn_softc *sc)
5028 {
5029 	struct iwn_phy_calib cmd;
5030 
5031 	memset(&cmd, 0, sizeof cmd);
5032 	cmd.code = sc->reset_noise_gain;
5033 	cmd.ngroups = 1;
5034 	cmd.isvalid = 1;
5035 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5036 }
5037 
5038 static int
5039 iwn4965_set_gains(struct iwn_softc *sc)
5040 {
5041 	struct iwn_calib_state *calib = &sc->calib;
5042 	struct iwn_phy_calib_gain cmd;
5043 	int i, delta, noise;
5044 
5045 	/* Get minimal noise among connected antennas. */
5046 	noise = INT_MAX;	/* NB: There's at least one antenna. */
5047 	for (i = 0; i < 3; i++)
5048 		if (sc->chainmask & (1 << i))
5049 			noise = MIN(calib->noise[i], noise);
5050 
5051 	memset(&cmd, 0, sizeof cmd);
5052 	cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5053 	/* Set differential gains for connected antennas. */
5054 	for (i = 0; i < 3; i++) {
5055 		if (sc->chainmask & (1 << i)) {
5056 			/* Compute attenuation (in unit of 1.5dB). */
5057 			delta = (noise - calib->noise[i]) / 30;
5058 			/* NB: delta <= 0 */
5059 			/* Limit to [-4.5dB,0]. */
5060 			cmd.gain[i] = (uint8_t)MIN(abs(delta), 3);
5061 			if (delta < 0)
5062 				cmd.gain[i] |= 1 << 2;	/* sign bit */
5063 			sc->sc_ant->gain[i].value.ul = cmd.gain[i];
5064 		}
5065 	}
5066 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5067 }
5068 
5069 static int
5070 iwn5000_set_gains(struct iwn_softc *sc)
5071 {
5072 	struct iwn_calib_state *calib = &sc->calib;
5073 	struct iwn_phy_calib_gain cmd;
5074 	int i, ant, div, delta;
5075 
5076 	/* We collected 20 beacons and !=6050 need a 1.5 factor. */
5077 	div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
5078 
5079 	memset(&cmd, 0, sizeof cmd);
5080 	cmd.code = sc->noise_gain;
5081 	cmd.ngroups = 1;
5082 	cmd.isvalid = 1;
5083 	/* Get first available RX antenna as referential. */
5084 	ant = IWN_LSB(sc->rxchainmask);
5085 	/* Set differential gains for other antennas. */
5086 	for (i = ant + 1; i < 3; i++) {
5087 		if (sc->chainmask & (1 << i)) {
5088 			/* The delta is relative to antenna "ant". */
5089 			delta = (calib->noise[ant] - calib->noise[i]) / div;
5090 			/* Limit to [-4.5dB,+4.5dB]. */
5091 			cmd.gain[i - 1] = (uint8_t)MIN(abs(delta), 3);
5092 			if (delta < 0)
5093 				cmd.gain[i - 1] |= 1 << 2;	/* sign bit */
5094 			sc->sc_ant->gain[i - 1].value.ul
5095 			    = cmd.gain[i - 1];
5096 		}
5097 	}
5098 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5099 }
5100 
5101 /*
5102  * Tune RF RX sensitivity based on the number of false alarms detected
5103  * during the last beacon period.
5104  */
5105 static void
5106 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
5107 {
5108 #define inc(val, inc, max)			\
5109 	if ((val) < (max)) {			\
5110 		if ((val) < (max) - (inc))	\
5111 			(val) += (inc);		\
5112 		else				\
5113 			(val) = (max);		\
5114 		needs_update = 1;		\
5115 	}
5116 #define dec(val, dec, min)			\
5117 	if ((val) > (min)) {			\
5118 		if ((val) > (min) + (dec))	\
5119 			(val) -= (dec);		\
5120 		else				\
5121 			(val) = (min);		\
5122 		needs_update = 1;		\
5123 	}
5124 
5125 	const struct iwn_sensitivity_limits *limits = sc->limits;
5126 	struct iwn_calib_state *calib = &sc->calib;
5127 	uint32_t val, rxena, fa;
5128 	uint32_t energy[3], energy_min;
5129 	uint8_t noise[3], noise_ref;
5130 	int i, needs_update = 0;
5131 
5132 	/* Check that we've been enabled long enough. */
5133 	if ((rxena = le32toh(stats->general.load)) == 0)
5134 		return;
5135 
5136 	/* Compute number of false alarms since last call for OFDM. */
5137 	fa  = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
5138 	fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
5139 	fa *= 200 * 1024;	/* 200TU */
5140 
5141 	/* Save counters values for next call. */
5142 	calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
5143 	calib->fa_ofdm = le32toh(stats->ofdm.fa);
5144 
5145 	if (fa > 50 * rxena) {
5146 		/* High false alarm count, decrease sensitivity. */
5147 		IWN_DBG("OFDM high false alarm count: %u", fa);
5148 		inc(calib->ofdm_x1,     1, limits->max_ofdm_x1);
5149 		inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
5150 		inc(calib->ofdm_x4,     1, limits->max_ofdm_x4);
5151 		inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
5152 
5153 	} else if (fa < 5 * rxena) {
5154 		/* Low false alarm count, increase sensitivity. */
5155 		IWN_DBG("OFDM low false alarm count: %u", fa);
5156 		dec(calib->ofdm_x1,     1, limits->min_ofdm_x1);
5157 		dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
5158 		dec(calib->ofdm_x4,     1, limits->min_ofdm_x4);
5159 		dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
5160 	}
5161 
5162 	/* Compute maximum noise among 3 receivers. */
5163 	for (i = 0; i < 3; i++)
5164 		noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
5165 	val = MAX(noise[0], noise[1]);
5166 	val = MAX(noise[2], val);
5167 	/* Insert it into our samples table. */
5168 	calib->noise_samples[calib->cur_noise_sample] = (uint8_t)val;
5169 	calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
5170 
5171 	/* Compute maximum noise among last 20 samples. */
5172 	noise_ref = calib->noise_samples[0];
5173 	for (i = 1; i < 20; i++)
5174 		noise_ref = MAX(noise_ref, calib->noise_samples[i]);
5175 
5176 	/* Compute maximum energy among 3 receivers. */
5177 	for (i = 0; i < 3; i++)
5178 		energy[i] = le32toh(stats->general.energy[i]);
5179 	val = MIN(energy[0], energy[1]);
5180 	val = MIN(energy[2], val);
5181 	/* Insert it into our samples table. */
5182 	calib->energy_samples[calib->cur_energy_sample] = val;
5183 	calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
5184 
5185 	/* Compute minimum energy among last 10 samples. */
5186 	energy_min = calib->energy_samples[0];
5187 	for (i = 1; i < 10; i++)
5188 		energy_min = MAX(energy_min, calib->energy_samples[i]);
5189 	energy_min += 6;
5190 
5191 	/* Compute number of false alarms since last call for CCK. */
5192 	fa  = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
5193 	fa += le32toh(stats->cck.fa) - calib->fa_cck;
5194 	fa *= 200 * 1024;	/* 200TU */
5195 
5196 	/* Save counters values for next call. */
5197 	calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
5198 	calib->fa_cck = le32toh(stats->cck.fa);
5199 
5200 	if (fa > 50 * rxena) {
5201 		/* High false alarm count, decrease sensitivity. */
5202 		IWN_DBG("CCK high false alarm count: %u", fa);
5203 		calib->cck_state = IWN_CCK_STATE_HIFA;
5204 		calib->low_fa = 0;
5205 
5206 		if (calib->cck_x4 > 160) {
5207 			calib->noise_ref = noise_ref;
5208 			if (calib->energy_cck > 2)
5209 				dec(calib->energy_cck, 2, energy_min);
5210 		}
5211 		if (calib->cck_x4 < 160) {
5212 			calib->cck_x4 = 161;
5213 			needs_update = 1;
5214 		} else
5215 			inc(calib->cck_x4, 3, limits->max_cck_x4);
5216 
5217 		inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
5218 
5219 	} else if (fa < 5 * rxena) {
5220 		/* Low false alarm count, increase sensitivity. */
5221 		IWN_DBG("CCK low false alarm count: %u", fa);
5222 		calib->cck_state = IWN_CCK_STATE_LOFA;
5223 		calib->low_fa++;
5224 
5225 		if (calib->cck_state != IWN_CCK_STATE_INIT &&
5226 		    (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
5227 		     calib->low_fa > 100)) {
5228 			inc(calib->energy_cck, 2, limits->min_energy_cck);
5229 			dec(calib->cck_x4,     3, limits->min_cck_x4);
5230 			dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
5231 		}
5232 	} else {
5233 		/* Not worth to increase or decrease sensitivity. */
5234 		IWN_DBG("CCK normal false alarm count: %u", fa);
5235 		calib->low_fa = 0;
5236 		calib->noise_ref = noise_ref;
5237 
5238 		if (calib->cck_state == IWN_CCK_STATE_HIFA) {
5239 			/* Previous interval had many false alarms. */
5240 			dec(calib->energy_cck, 8, energy_min);
5241 		}
5242 		calib->cck_state = IWN_CCK_STATE_INIT;
5243 	}
5244 
5245 	if (needs_update)
5246 		(void)iwn_send_sensitivity(sc);
5247 #undef dec
5248 #undef inc
5249 }
5250 
5251 static int
5252 iwn_send_sensitivity(struct iwn_softc *sc)
5253 {
5254 	struct iwn_calib_state *calib = &sc->calib;
5255 	struct iwn_enhanced_sensitivity_cmd cmd;
5256 	int len;
5257 
5258 	memset(&cmd, 0, sizeof cmd);
5259 	len = sizeof (struct iwn_sensitivity_cmd);
5260 	cmd.which = IWN_SENSITIVITY_WORKTBL;
5261 	/* OFDM modulation. */
5262 	cmd.corr_ofdm_x1     = htole16(calib->ofdm_x1);
5263 	cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
5264 	cmd.corr_ofdm_x4     = htole16(calib->ofdm_x4);
5265 	cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
5266 	cmd.energy_ofdm      = htole16(sc->limits->energy_ofdm);
5267 	cmd.energy_ofdm_th   = htole16(62);
5268 	/* CCK modulation. */
5269 	cmd.corr_cck_x4      = htole16(calib->cck_x4);
5270 	cmd.corr_cck_mrc_x4  = htole16(calib->cck_mrc_x4);
5271 	cmd.energy_cck       = htole16(calib->energy_cck);
5272 	/* Barker modulation: use default values. */
5273 	cmd.corr_barker      = htole16(190);
5274 	cmd.corr_barker_mrc  = htole16(390);
5275 	if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
5276 		goto send;
5277 	/* Enhanced sensitivity settings. */
5278 	len = sizeof (struct iwn_enhanced_sensitivity_cmd);
5279 	cmd.ofdm_det_slope_mrc = htole16(668);
5280 	cmd.ofdm_det_icept_mrc = htole16(4);
5281 	cmd.ofdm_det_slope     = htole16(486);
5282 	cmd.ofdm_det_icept     = htole16(37);
5283 	cmd.cck_det_slope_mrc  = htole16(853);
5284 	cmd.cck_det_icept_mrc  = htole16(4);
5285 	cmd.cck_det_slope      = htole16(476);
5286 	cmd.cck_det_icept      = htole16(99);
5287 send:
5288 
5289 	sc->sc_sens->ofdm_x1.value.ul = calib->ofdm_x1;
5290 	sc->sc_sens->ofdm_mrc_x1.value.ul = calib->ofdm_mrc_x1;
5291 	sc->sc_sens->ofdm_x4.value.ul = calib->ofdm_x4;
5292 	sc->sc_sens->ofdm_mrc_x4.value.ul = calib->ofdm_mrc_x4;
5293 	sc->sc_sens->cck_x4.value.ul = calib->cck_x4;
5294 	sc->sc_sens->cck_mrc_x4.value.ul = calib->cck_mrc_x4;
5295 	sc->sc_sens->energy_cck.value.ul = calib->energy_cck;
5296 
5297 	return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
5298 }
5299 
5300 /*
5301  * Set STA mode power saving level (between 0 and 5).
5302  * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
5303  */
5304 static int
5305 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
5306 {
5307 	struct iwn_pmgt_cmd cmd;
5308 	const struct iwn_pmgt *pmgt;
5309 	uint32_t maxp, skip_dtim;
5310 	uint32_t reg;
5311 	int i;
5312 
5313 	/* Select which PS parameters to use. */
5314 	if (dtim <= 2)
5315 		pmgt = &iwn_pmgt[0][level];
5316 	else if (dtim <= 10)
5317 		pmgt = &iwn_pmgt[1][level];
5318 	else
5319 		pmgt = &iwn_pmgt[2][level];
5320 
5321 	memset(&cmd, 0, sizeof cmd);
5322 	if (level != 0)	/* not CAM */
5323 		cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
5324 	if (level == 5)
5325 		cmd.flags |= htole16(IWN_PS_FAST_PD);
5326 	/* Retrieve PCIe Active State Power Management (ASPM). */
5327 	reg = pci_config_get32(sc->sc_pcih,
5328 	    sc->sc_cap_off + PCIE_LINKCTL);
5329 	if (!(reg & PCIE_LINKCTL_ASPM_CTL_L0S)) /* L0s Entry disabled. */
5330 		cmd.flags |= htole16(IWN_PS_PCI_PMGT);
5331 	cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
5332 	cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
5333 
5334 	if (dtim == 0) {
5335 		dtim = 1;
5336 		skip_dtim = 0;
5337 	} else
5338 		skip_dtim = pmgt->skip_dtim;
5339 	if (skip_dtim != 0) {
5340 		cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
5341 		maxp = pmgt->intval[4];
5342 		if (maxp == (uint32_t)-1)
5343 			maxp = dtim * (skip_dtim + 1);
5344 		else if (maxp > dtim)
5345 			maxp = (maxp / dtim) * dtim;
5346 	} else
5347 		maxp = dtim;
5348 	for (i = 0; i < 5; i++)
5349 		cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i]));
5350 
5351 	sc->sc_misc->pslevel.value.ul = level;
5352 	return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
5353 }
5354 
5355 int
5356 iwn5000_runtime_calib(struct iwn_softc *sc)
5357 {
5358 	struct iwn5000_calib_config cmd;
5359 
5360 	memset(&cmd, 0, sizeof cmd);
5361 	cmd.ucode.once.enable = 0xffffffff;
5362 	cmd.ucode.once.start = IWN5000_CALIB_DC;
5363 	return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5364 }
5365 
5366 static int
5367 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc)
5368 {
5369 	struct iwn_bluetooth bluetooth;
5370 
5371 	memset(&bluetooth, 0, sizeof bluetooth);
5372 	bluetooth.flags = IWN_BT_COEX_ENABLE;
5373 	bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF;
5374 	bluetooth.max_kill = IWN_BT_MAX_KILL_DEF;
5375 
5376 	return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0);
5377 }
5378 
5379 static int
5380 iwn_config_bt_coex_prio_table(struct iwn_softc *sc)
5381 {
5382 	uint8_t prio_table[16];
5383 
5384 	memset(&prio_table, 0, sizeof prio_table);
5385 	prio_table[ 0] =  6;	/* init calibration 1		*/
5386 	prio_table[ 1] =  7;	/* init calibration 2		*/
5387 	prio_table[ 2] =  2;	/* periodic calib low 1		*/
5388 	prio_table[ 3] =  3;	/* periodic calib low 2		*/
5389 	prio_table[ 4] =  4;	/* periodic calib high 1	*/
5390 	prio_table[ 5] =  5;	/* periodic calib high 2	*/
5391 	prio_table[ 6] =  6;	/* dtim				*/
5392 	prio_table[ 7] =  8;	/* scan52			*/
5393 	prio_table[ 8] = 10;	/* scan24			*/
5394 
5395 	return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE,
5396 	               &prio_table, sizeof prio_table, 0);
5397 }
5398 
5399 static int
5400 iwn_config_bt_coex_adv_config(struct iwn_softc *sc, struct iwn_bt_basic *basic,
5401     size_t len)
5402 {
5403 	struct iwn_btcoex_prot btprot;
5404 	int error;
5405 
5406 	basic->bt.flags = IWN_BT_COEX_ENABLE;
5407 	basic->bt.lead_time = IWN_BT_LEAD_TIME_DEF;
5408 	basic->bt.max_kill = IWN_BT_MAX_KILL_DEF;
5409 	basic->bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF;
5410 	basic->bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF;
5411 	basic->bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF;
5412 	basic->bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF;
5413 	basic->bt3_timer_t2_value = IWN_BT_BT3_T2_DEF;
5414 	basic->bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */
5415 	basic->bt3_lookup_table[ 1] = htole32(0xaaaaaaaa);
5416 	basic->bt3_lookup_table[ 2] = htole32(0xaeaaaaaa);
5417 	basic->bt3_lookup_table[ 3] = htole32(0xaaaaaaaa);
5418 	basic->bt3_lookup_table[ 4] = htole32(0xcc00ff28);
5419 	basic->bt3_lookup_table[ 5] = htole32(0x0000aaaa);
5420 	basic->bt3_lookup_table[ 6] = htole32(0xcc00aaaa);
5421 	basic->bt3_lookup_table[ 7] = htole32(0x0000aaaa);
5422 	basic->bt3_lookup_table[ 8] = htole32(0xc0004000);
5423 	basic->bt3_lookup_table[ 9] = htole32(0x00004000);
5424 	basic->bt3_lookup_table[10] = htole32(0xf0005000);
5425 	basic->bt3_lookup_table[11] = htole32(0xf0005000);
5426 	basic->reduce_txpower = 0; /* as not implemented */
5427 	basic->valid = IWN_BT_ALL_VALID_MASK;
5428 
5429 	error = iwn_cmd(sc, IWN_CMD_BT_COEX, &basic, len, 0);
5430 	if (error != 0) {
5431 		dev_err(sc->sc_dip, CE_WARN,
5432 		    "!could not configure advanced bluetooth coexistence");
5433 		return error;
5434 	}
5435 
5436 	error = iwn_config_bt_coex_prio_table(sc);
5437 	if (error != 0) {
5438 		dev_err(sc->sc_dip, CE_WARN,
5439 		    "!could not configure send BT priority table");
5440 		return error;
5441 	}
5442 
5443 	/* Force BT state machine change */
5444 	memset(&btprot, 0, sizeof btprot);
5445 	btprot.open = 1;
5446 	btprot.type = 1;
5447 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1);
5448 	if (error != 0) {
5449 		dev_err(sc->sc_dip, CE_WARN, "!could not open BT protcol");
5450 		return error;
5451 	}
5452 
5453 	btprot.open = 0;
5454 	error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof btprot, 1);
5455 	if (error != 0) {
5456 		dev_err(sc->sc_dip, CE_WARN, "!could not close BT protcol");
5457 		return error;
5458 	}
5459 	return 0;
5460 }
5461 
5462 static int
5463 iwn_config_bt_coex_adv1(struct iwn_softc *sc)
5464 {
5465 	struct iwn_bt_adv1 d;
5466 
5467 	memset(&d, 0, sizeof d);
5468 	d.prio_boost = IWN_BT_PRIO_BOOST_DEF;
5469 	d.tx_prio_boost = 0;
5470 	d.rx_prio_boost = 0;
5471 	return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d);
5472 }
5473 
5474 static int
5475 iwn_config_bt_coex_adv2(struct iwn_softc *sc)
5476 {
5477 	struct iwn_bt_adv2 d;
5478 
5479 	memset(&d, 0, sizeof d);
5480 	d.prio_boost = IWN_BT_PRIO_BOOST_DEF;
5481 	d.tx_prio_boost = 0;
5482 	d.rx_prio_boost = 0;
5483 	return iwn_config_bt_coex_adv_config(sc, &d.basic, sizeof d);
5484 }
5485 
5486 static int
5487 iwn_config(struct iwn_softc *sc)
5488 {
5489 	struct iwn_ops *ops = &sc->ops;
5490 	struct ieee80211com *ic = &sc->sc_ic;
5491 	uint32_t txmask;
5492 	uint16_t rxchain;
5493 	int error;
5494 
5495 	error = ops->config_bt_coex(sc);
5496 	if (error != 0) {
5497 		dev_err(sc->sc_dip, CE_WARN,
5498 		    "!could not configure bluetooth coexistence");
5499 		return error;
5500 	}
5501 
5502 	/* Set radio temperature sensor offset. */
5503 	if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5504 		error = iwn6000_temp_offset_calib(sc);
5505 		if (error != 0) {
5506 			dev_err(sc->sc_dip, CE_WARN,
5507 			    "!could not set temperature offset");
5508 			return error;
5509 		}
5510 	}
5511 
5512 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
5513 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
5514 	    sc->hw_type == IWN_HW_REV_TYPE_135  ||
5515 	    sc->hw_type == IWN_HW_REV_TYPE_105) {
5516 		error = iwn2000_temp_offset_calib(sc);
5517 		if (error != 0) {
5518 			dev_err(sc->sc_dip, CE_WARN,
5519 			    "!could not set temperature offset");
5520 			return error;
5521 		}
5522 	}
5523 
5524 	if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
5525 	    sc->hw_type == IWN_HW_REV_TYPE_6005) {
5526 		/* Configure runtime DC calibration. */
5527 		error = iwn5000_runtime_calib(sc);
5528 		if (error != 0) {
5529 			dev_err(sc->sc_dip, CE_WARN,
5530 			    "!could not configure runtime calibration");
5531 			return error;
5532 		}
5533 	}
5534 
5535 	/* Configure valid TX chains for 5000 Series. */
5536 	if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5537 		txmask = htole32(sc->txchainmask);
5538 		error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5539 		    sizeof txmask, 0);
5540 		if (error != 0) {
5541 			dev_err(sc->sc_dip, CE_WARN,
5542 			    "!could not configure valid TX chains");
5543 			return error;
5544 		}
5545 	}
5546 
5547 	/* Set mode, channel, RX filter and enable RX. */
5548 	memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5549 	IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_macaddr);
5550 	IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_macaddr);
5551 	sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
5552 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5553 	if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan))
5554 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5555 	switch (ic->ic_opmode) {
5556 	case IEEE80211_M_IBSS:
5557 		sc->rxon.mode = IWN_MODE_IBSS;
5558 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5559 		break;
5560 	case IEEE80211_M_STA:
5561 		sc->rxon.mode = IWN_MODE_STA;
5562 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5563 		break;
5564 	case IEEE80211_M_MONITOR:
5565 		sc->rxon.mode = IWN_MODE_MONITOR;
5566 		sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5567 		    IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5568 		break;
5569 	default:
5570 		/* Should not get there. */
5571 		ASSERT(ic->ic_opmode == IEEE80211_M_IBSS ||
5572 		    ic->ic_opmode == IEEE80211_M_STA ||
5573 		    ic->ic_opmode == IEEE80211_M_MONITOR);
5574 		break;
5575 	}
5576 	sc->rxon.cck_mask  = 0x0f;	/* not yet negotiated */
5577 	sc->rxon.ofdm_mask = 0xff;	/* not yet negotiated */
5578 	sc->rxon.ht_single_mask = 0xff;
5579 	sc->rxon.ht_dual_mask = 0xff;
5580 	sc->rxon.ht_triple_mask = 0xff;
5581 	rxchain =
5582 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5583 	    IWN_RXCHAIN_MIMO_COUNT(2) |
5584 	    IWN_RXCHAIN_IDLE_COUNT(2);
5585 	sc->rxon.rxchain = htole16(rxchain);
5586 	DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
5587 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5588 	if (error != 0) {
5589 		dev_err(sc->sc_dip, CE_WARN,
5590 		    "!RXON command failed");
5591 		return error;
5592 	}
5593 
5594 	if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5595 		dev_err(sc->sc_dip, CE_WARN,
5596 		    "!could not add broadcast node");
5597 		return error;
5598 	}
5599 
5600 	/* Configuration has changed, set TX power accordingly. */
5601 	if ((error = ops->set_txpower(sc, 0)) != 0) {
5602 		dev_err(sc->sc_dip, CE_WARN,
5603 		    "!could not set TX power");
5604 		return error;
5605 	}
5606 
5607 	if ((error = iwn_set_critical_temp(sc)) != 0) {
5608 		dev_err(sc->sc_dip, CE_WARN,
5609 		    "!could not set critical temperature");
5610 		return error;
5611 	}
5612 
5613 	/* Set power saving level to CAM during initialization. */
5614 	if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5615 		dev_err(sc->sc_dip, CE_WARN,
5616 		    "!could not set power saving level");
5617 		return error;
5618 	}
5619 	return 0;
5620 }
5621 
5622 static uint16_t
5623 iwn_get_active_dwell_time(struct iwn_softc *sc, uint16_t flags,
5624     uint8_t n_probes)
5625 {
5626 	_NOTE(ARGUNUSED(sc));
5627 
5628 	/* No channel? Default to 2GHz settings */
5629 	if (flags & IEEE80211_CHAN_2GHZ)
5630 		return IWN_ACTIVE_DWELL_TIME_2GHZ +
5631 		    IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1);
5632 
5633 	/* 5GHz dwell time */
5634 	return IWN_ACTIVE_DWELL_TIME_5GHZ +
5635 	    IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1);
5636 }
5637 
5638 /*
5639  * Limit the total dwell time to 85% of the beacon interval.
5640  *
5641  * Returns the dwell time in milliseconds.
5642  */
5643 static uint16_t
5644 iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
5645 {
5646 	_NOTE(ARGUNUSED(dwell_time));
5647 
5648 	struct ieee80211com *ic = &sc->sc_ic;
5649 	struct ieee80211_node *ni = ic->ic_bss;
5650 	int bintval = 0;
5651 
5652 	/* bintval is in TU (1.024mS) */
5653 	if (ni != NULL)
5654 		bintval = ni->in_intval;
5655 
5656 	/*
5657 	 * If it's non-zero, we should calculate the minimum of
5658 	 * it and the DWELL_BASE.
5659 	 *
5660 	 * XXX Yes, the math should take into account that bintval
5661 	 * is 1.024mS, not 1mS..
5662 	 */
5663 	if (bintval > 0)
5664 		return MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100));
5665 
5666 	/* No association context? Default */
5667 	return IWN_PASSIVE_DWELL_BASE;
5668 }
5669 
5670 static uint16_t
5671 iwn_get_passive_dwell_time(struct iwn_softc *sc, uint16_t flags)
5672 {
5673 	uint16_t passive;
5674 	if (flags & IEEE80211_CHAN_2GHZ)
5675 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
5676 	else
5677 		passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
5678 
5679 	/* Clamp to the beacon interval if we're associated */
5680 	return iwn_limit_dwell(sc, passive);
5681 }
5682 
5683 static int
5684 iwn_scan(struct iwn_softc *sc, uint16_t flags)
5685 {
5686 	struct ieee80211com *ic = &sc->sc_ic;
5687 	struct iwn_scan_hdr *hdr;
5688 	struct iwn_cmd_data *tx;
5689 	struct iwn_scan_essid *essid;
5690 	struct iwn_scan_chan *chan;
5691 	struct ieee80211_frame *wh;
5692 	struct ieee80211_rateset *rs;
5693 	struct ieee80211_channel *c;
5694 	uint8_t *buf, *frm;
5695 	uint16_t rxchain, dwell_active, dwell_passive;
5696 	uint8_t txant;
5697 	int buflen, error, is_active;
5698 
5699 	buf = kmem_zalloc(IWN_SCAN_MAXSZ, KM_NOSLEEP);
5700 	if (buf == NULL) {
5701 		dev_err(sc->sc_dip, CE_WARN,
5702 		    "!could not allocate buffer for scan command");
5703 		return ENOMEM;
5704 	}
5705 	hdr = (struct iwn_scan_hdr *)buf;
5706 	/*
5707 	 * Move to the next channel if no frames are received within 20ms
5708 	 * after sending the probe request.
5709 	 */
5710 	hdr->quiet_time = htole16(20);		/* timeout in milliseconds */
5711 	hdr->quiet_threshold = htole16(1);	/* min # of packets */
5712 
5713 	/* Select antennas for scanning. */
5714 	rxchain =
5715 	    IWN_RXCHAIN_VALID(sc->rxchainmask) |
5716 	    IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5717 	    IWN_RXCHAIN_DRIVER_FORCE;
5718 	if ((flags & IEEE80211_CHAN_5GHZ) &&
5719 	    sc->hw_type == IWN_HW_REV_TYPE_4965) {
5720 		/* Ant A must be avoided in 5GHz because of an HW bug. */
5721 		rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
5722 	} else	/* Use all available RX antennas. */
5723 		rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5724 	hdr->rxchain = htole16(rxchain);
5725 	hdr->filter = htole32(IWN_FILTER_MULTICAST |  IWN_FILTER_BEACON);
5726 
5727 	tx = (struct iwn_cmd_data *)(hdr + 1);
5728 	tx->flags = htole32(IWN_TX_AUTO_SEQ);
5729 	tx->id = sc->broadcast_id;
5730 	tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5731 
5732 	if (flags & IEEE80211_CHAN_5GHZ) {
5733 		/* Send probe requests at 6Mbps. */
5734 		tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
5735 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5736 	} else {
5737 		hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5738 		/* Send probe requests at 1Mbps. */
5739 		tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
5740 		tx->rflags = IWN_RFLAG_CCK;
5741 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5742 	}
5743 
5744 	hdr->crc_threshold = 0xffff;
5745 
5746 	/* Use the first valid TX antenna. */
5747 	txant = IWN_LSB(sc->txchainmask);
5748 	tx->rflags |= IWN_RFLAG_ANT(txant);
5749 
5750 	/*
5751 	 * Only do active scanning if we're announcing a probe request
5752 	 * for a given SSID (or more, if we ever add it to the driver.)
5753 	 */
5754 	is_active = 0;
5755 
5756 	essid = (struct iwn_scan_essid *)(tx + 1);
5757 	if (ic->ic_des_esslen != 0) {
5758 		char essidstr[IEEE80211_NWID_LEN+1];
5759 		memcpy(essidstr, ic->ic_des_essid, ic->ic_des_esslen);
5760 		essidstr[ic->ic_des_esslen] = '\0';
5761 
5762 		DTRACE_PROBE1(scan__direct, char *, essidstr);
5763 
5764 		essid[0].id = IEEE80211_ELEMID_SSID;
5765 		essid[0].len = ic->ic_des_esslen;
5766 		memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
5767 
5768 		is_active = 1;
5769 		/* hdr->crc_threshold = 0x1; */
5770 		hdr->scan_flags = htole32(IWN_SCAN_PASSIVE2ACTIVE);
5771 	}
5772 	/*
5773 	 * Build a probe request frame.  Most of the following code is a
5774 	 * copy & paste of what is done in net80211.
5775 	 */
5776 	wh = (struct ieee80211_frame *)(essid + 20);
5777 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5778 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5779 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5780 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5781 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
5782 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5783 	wh->i_dur[0] = wh->i_dur[1] = 0; /* filled by HW */
5784 	wh->i_seq[0] = wh->i_seq[1] = 0; /* filled by HW */
5785 
5786 	frm = (uint8_t *)(wh + 1);
5787 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5788 	frm = ieee80211_add_rates(frm, rs);
5789 #ifndef IEEE80211_NO_HT
5790 	if (ic->ic_flags & IEEE80211_F_HTON)
5791 		frm = ieee80211_add_htcaps(frm, ic);
5792 #endif
5793 	if (rs->ir_nrates > IEEE80211_RATE_SIZE)
5794 		frm = ieee80211_add_xrates(frm, rs);
5795 
5796 	/* Set length of probe request. */
5797 	/*LINTED: E_PTRDIFF_OVERFLOW*/
5798 	tx->len = htole16(frm - (uint8_t *)wh);
5799 
5800 
5801 	/*
5802 	 * If active scanning is requested but a certain channel is
5803 	 * marked passive, we can do active scanning if we detect
5804 	 * transmissions.
5805 	 *
5806 	 * There is an issue with some firmware versions that triggers
5807 	 * a sysassert on a "good CRC threshold" of zero (== disabled),
5808 	 * on a radar channel even though this means that we should NOT
5809 	 * send probes.
5810 	 *
5811 	 * The "good CRC threshold" is the number of frames that we
5812 	 * need to receive during our dwell time on a channel before
5813 	 * sending out probes -- setting this to a huge value will
5814 	 * mean we never reach it, but at the same time work around
5815 	 * the aforementioned issue. Thus use IWN_GOOD_CRC_TH_NEVER
5816 	 * here instead of IWN_GOOD_CRC_TH_DISABLED.
5817 	 *
5818 	 * This was fixed in later versions along with some other
5819 	 * scan changes, and the threshold behaves as a flag in those
5820 	 * versions.
5821 	 */
5822 
5823 	/*
5824 	 * If we're doing active scanning, set the crc_threshold
5825 	 * to a suitable value.  This is different to active veruss
5826 	 * passive scanning depending upon the channel flags; the
5827 	 * firmware will obey that particular check for us.
5828 	 */
5829 	if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
5830 		hdr->crc_threshold = is_active ?
5831 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
5832 	else
5833 		hdr->crc_threshold = is_active ?
5834 		    IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
5835 
5836 	chan = (struct iwn_scan_chan *)frm;
5837 	for (c  = &ic->ic_sup_channels[1];
5838 	     c <= &ic->ic_sup_channels[IEEE80211_CHAN_MAX]; c++) {
5839 		if ((c->ich_flags & flags) != flags)
5840 			continue;
5841 		chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5842 		chan->flags = 0;
5843 		if (!(c->ich_flags & IEEE80211_CHAN_PASSIVE))
5844 			chan->flags |= htole32(IWN_CHAN_ACTIVE);
5845 		if (ic->ic_des_esslen != 0)
5846 			chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5847 
5848 		/*
5849 		 * Calculate the active/passive dwell times.
5850 		 */
5851 
5852 		dwell_active = iwn_get_active_dwell_time(sc, flags, is_active);
5853 		dwell_passive = iwn_get_passive_dwell_time(sc, flags);
5854 
5855 		/* Make sure they're valid */
5856 		if (dwell_passive <= dwell_active)
5857 			dwell_passive = dwell_active + 1;
5858 
5859 		chan->active = htole16(dwell_active);
5860 		chan->passive = htole16(dwell_passive);
5861 
5862 		chan->dsp_gain = 0x6e;
5863 		if (IEEE80211_IS_CHAN_5GHZ(c)) {
5864 			chan->rf_gain = 0x3b;
5865 		} else {
5866 			chan->rf_gain = 0x28;
5867 		}
5868 		DTRACE_PROBE5(add__channel, uint8_t, chan->chan,
5869 		    uint32_t, chan->flags, uint8_t, chan->rf_gain,
5870 		    uint16_t, chan->active, uint16_t, chan->passive);
5871 		hdr->nchan++;
5872 		chan++;
5873 	}
5874 
5875 	/*LINTED: E_PTRDIFF_OVERFLOW*/
5876 	buflen = (uint8_t *)chan - buf;
5877 	hdr->len = htole16(buflen);
5878 
5879 	error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5880 	kmem_free(buf, IWN_SCAN_MAXSZ);
5881 	return error;
5882 }
5883 
5884 static int
5885 iwn_auth(struct iwn_softc *sc)
5886 {
5887 	struct iwn_ops *ops = &sc->ops;
5888 	struct ieee80211com *ic = &sc->sc_ic;
5889 	struct ieee80211_node *ni = ic->ic_bss;
5890 	int error;
5891 
5892 	ASSERT(ni->in_chan != NULL);
5893 
5894 	/* Update adapter configuration. */
5895 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->in_bssid);
5896 	sc->rxon.chan = ieee80211_chan2ieee(ic, ni->in_chan);
5897 	sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5898 	if ((ni->in_chan != IEEE80211_CHAN_ANYC) &&
5899 	    IEEE80211_IS_CHAN_2GHZ(ni->in_chan))
5900 		sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5901 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
5902 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5903 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5904 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5905 	switch (ic->ic_curmode) {
5906 	case IEEE80211_MODE_11A:
5907 		sc->rxon.cck_mask  = 0;
5908 		sc->rxon.ofdm_mask = 0x15;
5909 		break;
5910 	case IEEE80211_MODE_11B:
5911 		sc->rxon.cck_mask  = 0x03;
5912 		sc->rxon.ofdm_mask = 0;
5913 		break;
5914 	default:	/* Assume 802.11b/g. */
5915 		sc->rxon.cck_mask  = 0x0f;
5916 		sc->rxon.ofdm_mask = 0x15;
5917 	}
5918 	DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
5919 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5920 	if (error != 0) {
5921 		dev_err(sc->sc_dip, CE_WARN,
5922 		    "!RXON command failed");
5923 		return error;
5924 	}
5925 
5926 	/* Configuration has changed, set TX power accordingly. */
5927 	if ((error = ops->set_txpower(sc, 1)) != 0) {
5928 		dev_err(sc->sc_dip, CE_WARN,
5929 		    "!could not set TX power");
5930 		return error;
5931 	}
5932 	/*
5933 	 * Reconfiguring RXON clears the firmware nodes table so we must
5934 	 * add the broadcast node again.
5935 	 */
5936 	if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5937 		dev_err(sc->sc_dip, CE_WARN,
5938 		    "!could not add broadcast node");
5939 		return error;
5940 	}
5941 	return 0;
5942 }
5943 
5944 static int
5945 iwn_fast_recover(struct iwn_softc *sc)
5946 {
5947 	int err = IWN_FAIL;
5948 
5949 	mutex_enter(&sc->sc_mtx);
5950 
5951 	/* restore runtime configuration */
5952 	bcopy(&sc->rxon_save, &sc->rxon,
5953 	    sizeof (sc->rxon));
5954 
5955 	sc->rxon.associd = 0;
5956 	sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
5957 
5958 	if ((err = iwn_auth(sc)) != IWN_SUCCESS) {
5959 		dev_err(sc->sc_dip, CE_WARN, "!iwn_fast_recover(): "
5960 		    "could not setup authentication");
5961 		mutex_exit(&sc->sc_mtx);
5962 		return (err);
5963 	}
5964 
5965 	bcopy(&sc->rxon_save, &sc->rxon, sizeof (sc->rxon));
5966 
5967 	/* update adapter's configuration */
5968 	err = iwn_run(sc);
5969 	if (err != IWN_SUCCESS) {
5970 		dev_err(sc->sc_dip, CE_WARN, "!iwn_fast_recover(): "
5971 		    "failed to setup association");
5972 		mutex_exit(&sc->sc_mtx);
5973 		return (err);
5974 	}
5975 	/* set LED on */
5976 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5977 
5978 	sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
5979 	mutex_exit(&sc->sc_mtx);
5980 
5981 	/* start queue */
5982 	DTRACE_PROBE(resume__xmit);
5983 
5984 	return (IWN_SUCCESS);
5985 }
5986 
5987 static int
5988 iwn_run(struct iwn_softc *sc)
5989 {
5990 	struct iwn_ops *ops = &sc->ops;
5991 	struct ieee80211com *ic = &sc->sc_ic;
5992 	struct ieee80211_node *ni = ic->ic_bss;
5993 	struct iwn_node_info node;
5994 	int error;
5995 
5996 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5997 		/* Link LED blinks while monitoring. */
5998 		iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5999 		return 0;
6000 	}
6001 	if ((error = iwn_set_timing(sc, ni)) != 0) {
6002 		dev_err(sc->sc_dip, CE_WARN,
6003 		    "!could not set timing");
6004 		return error;
6005 	}
6006 
6007 	/* Update adapter configuration. */
6008 	IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->in_bssid);
6009 	sc->rxon.associd = htole16(IEEE80211_AID(ni->in_associd));
6010 	/* Short preamble and slot time are negotiated when associating. */
6011 	sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
6012 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6013 		sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
6014 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6015 		sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
6016 	sc->rxon.filter |= htole32(IWN_FILTER_BSS);
6017 	if (ic->ic_opmode != IEEE80211_M_STA &&
6018 	    ic->ic_opmode != IEEE80211_M_IBSS)
6019 		sc->rxon.filter |= htole32(IWN_FILTER_BEACON);
6020 	DTRACE_PROBE2(rxon, struct iwn_rxon *, &sc->rxon, int, sc->rxonsz);
6021 	error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
6022 	if (error != 0) {
6023 		dev_err(sc->sc_dip, CE_WARN,
6024 		    "!could not update configuration");
6025 		return error;
6026 	}
6027 
6028 	/* Configuration has changed, set TX power accordingly. */
6029 	if ((error = ops->set_txpower(sc, 1)) != 0) {
6030 		dev_err(sc->sc_dip, CE_WARN,
6031 		    "!could not set TX power");
6032 		return error;
6033 	}
6034 
6035 	/* Fake a join to initialize the TX rate. */
6036 	((struct iwn_node *)ni)->id = IWN_ID_BSS;
6037 	iwn_newassoc(ni, 1);
6038 
6039 	/* Add BSS node. */
6040 	memset(&node, 0, sizeof node);
6041 	IEEE80211_ADDR_COPY(node.macaddr, ni->in_macaddr);
6042 	node.id = IWN_ID_BSS;
6043 #ifdef notyet
6044 	node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
6045 	    IWN_AMDPU_DENSITY(5));	/* 2us */
6046 #endif
6047 	error = ops->add_node(sc, &node, 1);
6048 	if (error != 0) {
6049 		dev_err(sc->sc_dip, CE_WARN,
6050 		    "!could not add BSS node");
6051 		return error;
6052 	}
6053 	if ((error = iwn_set_link_quality(sc, ni)) != 0) {
6054 		dev_err(sc->sc_dip, CE_WARN,
6055 		    "!could not setup link quality for node %d", node.id);
6056 		return error;
6057 	}
6058 
6059 	if ((error = iwn_init_sensitivity(sc)) != 0) {
6060 		dev_err(sc->sc_dip, CE_WARN,
6061 		    "!could not set sensitivity");
6062 		return error;
6063 	}
6064 
6065 	if ((error = iwn_qosparam_to_hw(sc, 1)) != 0) {
6066 		dev_err(sc->sc_dip, CE_WARN,
6067 		    "!could not set QoS params");
6068 		return (error);
6069 	}
6070 
6071 	/* Start periodic calibration timer. */
6072 	sc->sc_flags &= ~IWN_FLAG_STOP_CALIB_TO;
6073 	sc->calib.state = IWN_CALIB_STATE_ASSOC;
6074 	sc->calib_cnt = 0;
6075 	sc->calib_to = timeout(iwn_calib_timeout, sc, drv_usectohz(500000));
6076 
6077 	/* Link LED always on while associated. */
6078 	iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6079 	return 0;
6080 }
6081 
6082 #ifdef IWN_HWCRYPTO
6083 /*
6084  * We support CCMP hardware encryption/decryption of unicast frames only.
6085  * HW support for TKIP really sucks.  We should let TKIP die anyway.
6086  */
6087 static int
6088 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6089     struct ieee80211_key *k)
6090 {
6091 	struct iwn_softc *sc = ic->ic_softc;
6092 	struct iwn_ops *ops = &sc->ops;
6093 	struct iwn_node *wn = (void *)ni;
6094 	struct iwn_node_info node;
6095 	uint16_t kflags;
6096 
6097 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6098 	    k->k_cipher != IEEE80211_CIPHER_CCMP)
6099 		return ieee80211_set_key(ic, ni, k);
6100 
6101 	kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
6102 	if (k->k_flags & IEEE80211_KEY_GROUP)
6103 		kflags |= IWN_KFLAG_GROUP;
6104 
6105 	memset(&node, 0, sizeof node);
6106 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
6107 	    sc->broadcast_id : wn->id;
6108 	node.control = IWN_NODE_UPDATE;
6109 	node.flags = IWN_FLAG_SET_KEY;
6110 	node.kflags = htole16(kflags);
6111 	node.kid = k->k_id;
6112 	memcpy(node.key, k->k_key, k->k_len);
6113 	DTRACE_PROBE2(set__key, int, k->k_id, int, node.id);
6114 	return ops->add_node(sc, &node, 1);
6115 }
6116 
6117 static void
6118 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
6119     struct ieee80211_key *k)
6120 {
6121 	struct iwn_softc *sc = ic->ic_softc;
6122 	struct iwn_ops *ops = &sc->ops;
6123 	struct iwn_node *wn = (void *)ni;
6124 	struct iwn_node_info node;
6125 
6126 	if ((k->k_flags & IEEE80211_KEY_GROUP) ||
6127 	    k->k_cipher != IEEE80211_CIPHER_CCMP) {
6128 		/* See comment about other ciphers above. */
6129 		ieee80211_delete_key(ic, ni, k);
6130 		return;
6131 	}
6132 	if (ic->ic_state != IEEE80211_S_RUN)
6133 		return;	/* Nothing to do. */
6134 	memset(&node, 0, sizeof node);
6135 	node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
6136 	    sc->broadcast_id : wn->id;
6137 	node.control = IWN_NODE_UPDATE;
6138 	node.flags = IWN_FLAG_SET_KEY;
6139 	node.kflags = htole16(IWN_KFLAG_INVALID);
6140 	node.kid = 0xff;
6141 	DTRACE_PROBE1(del__key, int, node.id);
6142 	(void)ops->add_node(sc, &node, 1);
6143 }
6144 #endif
6145 
6146 #ifndef IEEE80211_NO_HT
6147 /*
6148  * This function is called by upper layer when an ADDBA request is received
6149  * from another STA and before the ADDBA response is sent.
6150  */
6151 static int
6152 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
6153     uint8_t tid)
6154 {
6155 	struct ieee80211_rx_ba *ba = &ni->in_rx_ba[tid];
6156 	struct iwn_softc *sc = ic->ic_softc;
6157 	struct iwn_ops *ops = &sc->ops;
6158 	struct iwn_node *wn = (void *)ni;
6159 	struct iwn_node_info node;
6160 
6161 	memset(&node, 0, sizeof node);
6162 	node.id = wn->id;
6163 	node.control = IWN_NODE_UPDATE;
6164 	node.flags = IWN_FLAG_SET_ADDBA;
6165 	node.addba_tid = tid;
6166 	node.addba_ssn = htole16(ba->ba_winstart);
6167 	DTRACE_PROBE3(addba, uint8_t, wn->id, uint8_t, tid, int, ba->ba_winstart);
6168 	return ops->add_node(sc, &node, 1);
6169 }
6170 
6171 /*
6172  * This function is called by upper layer on teardown of an HT-immediate
6173  * Block Ack agreement (eg. uppon receipt of a DELBA frame).
6174  */
6175 static void
6176 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
6177     uint8_t tid)
6178 {
6179 	struct iwn_softc *sc = ic->ic_softc;
6180 	struct iwn_ops *ops = &sc->ops;
6181 	struct iwn_node *wn = (void *)ni;
6182 	struct iwn_node_info node;
6183 
6184 	memset(&node, 0, sizeof node);
6185 	node.id = wn->id;
6186 	node.control = IWN_NODE_UPDATE;
6187 	node.flags = IWN_FLAG_SET_DELBA;
6188 	node.delba_tid = tid;
6189 	DTRACE_PROBE2(delba, uint8_t, wn->id, uint8_t, tid);
6190 	(void)ops->add_node(sc, &node, 1);
6191 }
6192 
6193 /*
6194  * This function is called by upper layer when an ADDBA response is received
6195  * from another STA.
6196  */
6197 static int
6198 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
6199     uint8_t tid)
6200 {
6201 	struct ieee80211_tx_ba *ba = &ni->in_tx_ba[tid];
6202 	struct iwn_softc *sc = ic->ic_softc;
6203 	struct iwn_ops *ops = &sc->ops;
6204 	struct iwn_node *wn = (void *)ni;
6205 	struct iwn_node_info node;
6206 	int error;
6207 
6208 	/* Enable TX for the specified RA/TID. */
6209 	wn->disable_tid &= ~(1 << tid);
6210 	memset(&node, 0, sizeof node);
6211 	node.id = wn->id;
6212 	node.control = IWN_NODE_UPDATE;
6213 	node.flags = IWN_FLAG_SET_DISABLE_TID;
6214 	node.disable_tid = htole16(wn->disable_tid);
6215 	error = ops->add_node(sc, &node, 1);
6216 	if (error != 0)
6217 		return error;
6218 
6219 	if ((error = iwn_nic_lock(sc)) != 0)
6220 		return error;
6221 	ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
6222 	iwn_nic_unlock(sc);
6223 	return 0;
6224 }
6225 
6226 static void
6227 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
6228     uint8_t tid)
6229 {
6230 	struct ieee80211_tx_ba *ba = &ni->in_tx_ba[tid];
6231 	struct iwn_softc *sc = ic->ic_softc;
6232 	struct iwn_ops *ops = &sc->ops;
6233 
6234 	if (iwn_nic_lock(sc) != 0)
6235 		return;
6236 	ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
6237 	iwn_nic_unlock(sc);
6238 }
6239 
6240 static void
6241 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6242     uint8_t tid, uint16_t ssn)
6243 {
6244 	struct iwn_node *wn = (void *)ni;
6245 	int qid = 7 + tid;
6246 
6247 	/* Stop TX scheduler while we're changing its configuration. */
6248 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6249 	    IWN4965_TXQ_STATUS_CHGACT);
6250 
6251 	/* Assign RA/TID translation to the queue. */
6252 	iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
6253 	    wn->id << 4 | tid);
6254 
6255 	/* Enable chain-building mode for the queue. */
6256 	iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
6257 
6258 	/* Set starting sequence number from the ADDBA request. */
6259 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6260 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6261 
6262 	/* Set scheduler window size. */
6263 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
6264 	    IWN_SCHED_WINSZ);
6265 	/* Set scheduler frame limit. */
6266 	iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6267 	    IWN_SCHED_LIMIT << 16);
6268 
6269 	/* Enable interrupts for the queue. */
6270 	iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6271 
6272 	/* Mark the queue as active. */
6273 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6274 	    IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
6275 	    iwn_tid2fifo[tid] << 1);
6276 }
6277 
6278 static void
6279 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
6280 {
6281 	int qid = 7 + tid;
6282 
6283 	/* Stop TX scheduler while we're changing its configuration. */
6284 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6285 	    IWN4965_TXQ_STATUS_CHGACT);
6286 
6287 	/* Set starting sequence number from the ADDBA request. */
6288 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6289 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
6290 
6291 	/* Disable interrupts for the queue. */
6292 	iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
6293 
6294 	/* Mark the queue as inactive. */
6295 	iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6296 	    IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
6297 }
6298 
6299 static void
6300 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
6301     uint8_t tid, uint16_t ssn)
6302 {
6303 	struct iwn_node *wn = (void *)ni;
6304 	int qid = 10 + tid;
6305 
6306 	/* Stop TX scheduler while we're changing its configuration. */
6307 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6308 	    IWN5000_TXQ_STATUS_CHGACT);
6309 
6310 	/* Assign RA/TID translation to the queue. */
6311 	iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
6312 	    wn->id << 4 | tid);
6313 
6314 	/* Enable chain-building mode for the queue. */
6315 	iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
6316 
6317 	/* Enable aggregation for the queue. */
6318 	iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6319 
6320 	/* Set starting sequence number from the ADDBA request. */
6321 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6322 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6323 
6324 	/* Set scheduler window size and frame limit. */
6325 	iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6326 	    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6327 
6328 	/* Enable interrupts for the queue. */
6329 	iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6330 
6331 	/* Mark the queue as active. */
6332 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6333 	    IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
6334 }
6335 
6336 static void
6337 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
6338 {
6339 	int qid = 10 + tid;
6340 
6341 	/* Stop TX scheduler while we're changing its configuration. */
6342 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6343 	    IWN5000_TXQ_STATUS_CHGACT);
6344 
6345 	/* Disable aggregation for the queue. */
6346 	iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
6347 
6348 	/* Set starting sequence number from the ADDBA request. */
6349 	IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
6350 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
6351 
6352 	/* Disable interrupts for the queue. */
6353 	iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
6354 
6355 	/* Mark the queue as inactive. */
6356 	iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6357 	    IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
6358 }
6359 #endif	/* !IEEE80211_NO_HT */
6360 
6361 /*
6362  * Query calibration tables from the initialization firmware.  We do this
6363  * only once at first boot.  Called from a process context.
6364  */
6365 static int
6366 iwn5000_query_calibration(struct iwn_softc *sc)
6367 {
6368 	struct iwn5000_calib_config cmd;
6369 	int error;
6370 	clock_t clk;
6371 
6372 	ASSERT(mutex_owned(&sc->sc_mtx));
6373 
6374 	memset(&cmd, 0, sizeof cmd);
6375 	cmd.ucode.once.enable = 0xffffffff;
6376 	cmd.ucode.once.start  = 0xffffffff;
6377 	cmd.ucode.once.send   = 0xffffffff;
6378 	cmd.ucode.flags       = 0xffffffff;
6379 	error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
6380 	if (error != 0)
6381 		return error;
6382 
6383 	/* Wait at most two seconds for calibration to complete. */
6384 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
6385 	while (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
6386 		if (cv_timedwait(&sc->sc_calib_cv, &sc->sc_mtx, clk) < 0)
6387 			return (IWN_FAIL);
6388 
6389 	return (IWN_SUCCESS);
6390 }
6391 
6392 /*
6393  * Send calibration results to the runtime firmware.  These results were
6394  * obtained on first boot from the initialization firmware.
6395  */
6396 static int
6397 iwn5000_send_calibration(struct iwn_softc *sc)
6398 {
6399 	int idx, error;
6400 
6401 	for (idx = 0; idx < 5; idx++) {
6402 		if (sc->calibcmd[idx].buf == NULL)
6403 			continue;	/* No results available. */
6404 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
6405 		    sc->calibcmd[idx].len, 0);
6406 		if (error != 0) {
6407 			dev_err(sc->sc_dip, CE_WARN,
6408 			    "!could not send calibration result");
6409 			return error;
6410 		}
6411 	}
6412 	return 0;
6413 }
6414 
6415 static int
6416 iwn5000_send_wimax_coex(struct iwn_softc *sc)
6417 {
6418 	struct iwn5000_wimax_coex wimax;
6419 
6420 #ifdef notyet
6421 	if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6422 		/* Enable WiMAX coexistence for combo adapters. */
6423 		wimax.flags =
6424 		    IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
6425 		    IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
6426 		    IWN_WIMAX_COEX_STA_TABLE_VALID |
6427 		    IWN_WIMAX_COEX_ENABLE;
6428 		memcpy(wimax.events, iwn6050_wimax_events,
6429 		    sizeof iwn6050_wimax_events);
6430 	} else
6431 #endif
6432 	{
6433 		/* Disable WiMAX coexistence. */
6434 		wimax.flags = 0;
6435 		memset(wimax.events, 0, sizeof wimax.events);
6436 	}
6437 	return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6438 }
6439 
6440 static int
6441 iwn6000_temp_offset_calib(struct iwn_softc *sc)
6442 {
6443 	struct iwn6000_phy_calib_temp_offset cmd;
6444 
6445 	memset(&cmd, 0, sizeof cmd);
6446 	cmd.code = IWN6000_PHY_CALIB_TEMP_OFFSET;
6447 	cmd.ngroups = 1;
6448 	cmd.isvalid = 1;
6449 	if (sc->eeprom_temp != 0)
6450 		cmd.offset = htole16(sc->eeprom_temp);
6451 	else
6452 		cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6453 	sc->sc_toff.t6000->toff.value.l = le16toh(cmd.offset);
6454 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6455 }
6456 
6457 static int
6458 iwn2000_temp_offset_calib(struct iwn_softc *sc)
6459 {
6460 	struct iwn2000_phy_calib_temp_offset cmd;
6461 
6462 	memset(&cmd, 0, sizeof cmd);
6463 	cmd.code = IWN2000_PHY_CALIB_TEMP_OFFSET;
6464 	cmd.ngroups = 1;
6465 	cmd.isvalid = 1;
6466 	if (sc->eeprom_rawtemp != 0) {
6467 		cmd.offset_low = htole16(sc->eeprom_rawtemp);
6468 		cmd.offset_high = htole16(sc->eeprom_temp);
6469 	} else {
6470 		cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
6471 		cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
6472 	}
6473 	cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
6474 	sc->sc_toff.t2000->toff_lo.value.l = le16toh(cmd.offset_low);
6475 	sc->sc_toff.t2000->toff_hi.value.l = le16toh(cmd.offset_high);
6476 	sc->sc_toff.t2000->volt.value.l = le16toh(cmd.burnt_voltage_ref);
6477 
6478 	return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6479 }
6480 
6481 /*
6482  * This function is called after the runtime firmware notifies us of its
6483  * readiness (called in a process context).
6484  */
6485 static int
6486 iwn4965_post_alive(struct iwn_softc *sc)
6487 {
6488 	int error, qid;
6489 
6490 	if ((error = iwn_nic_lock(sc)) != 0)
6491 		return error;
6492 
6493 	/* Clear TX scheduler state in SRAM. */
6494 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6495 	iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6496 	    IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6497 
6498 	/* Set physical address of TX scheduler rings (1KB aligned). */
6499 	iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6500 
6501 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6502 
6503 	/* Disable chain mode for all our 16 queues. */
6504 	iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6505 
6506 	for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6507 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6508 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6509 
6510 		/* Set scheduler window size. */
6511 		iwn_mem_write(sc, sc->sched_base +
6512 		    IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6513 		/* Set scheduler frame limit. */
6514 		iwn_mem_write(sc, sc->sched_base +
6515 		    IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6516 		    IWN_SCHED_LIMIT << 16);
6517 	}
6518 
6519 	/* Enable interrupts for all our 16 queues. */
6520 	iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6521 	/* Identify TX FIFO rings (0-7). */
6522 	iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6523 
6524 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6525 	for (qid = 0; qid < 7; qid++) {
6526 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6527 		iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6528 		    IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6529 	}
6530 	iwn_nic_unlock(sc);
6531 	return 0;
6532 }
6533 
6534 /*
6535  * This function is called after the initialization or runtime firmware
6536  * notifies us of its readiness (called in a process context).
6537  */
6538 static int
6539 iwn5000_post_alive(struct iwn_softc *sc)
6540 {
6541 	int error, qid;
6542 
6543 	/* Switch to using ICT interrupt mode. */
6544 	iwn5000_ict_reset(sc);
6545 
6546 	if ((error = iwn_nic_lock(sc)) != 0)
6547 		return error;
6548 
6549 	/* Clear TX scheduler state in SRAM. */
6550 	sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6551 	iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6552 	    IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6553 
6554 	/* Set physical address of TX scheduler rings (1KB aligned). */
6555 	iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6556 
6557 	IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6558 
6559 	/* Enable chain mode for all queues, except command queue. */
6560 	iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6561 	iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6562 
6563 	for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6564 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6565 		IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6566 
6567 		iwn_mem_write(sc, sc->sched_base +
6568 		    IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6569 		/* Set scheduler window size and frame limit. */
6570 		iwn_mem_write(sc, sc->sched_base +
6571 		    IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6572 		    IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6573 	}
6574 
6575 	/* Enable interrupts for all our 20 queues. */
6576 	iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6577 	/* Identify TX FIFO rings (0-7). */
6578 	iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6579 
6580 	/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6581 	for (qid = 0; qid < 7; qid++) {
6582 		static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6583 		iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6584 		    IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6585 	}
6586 	iwn_nic_unlock(sc);
6587 
6588 	/* Configure WiMAX coexistence for combo adapters. */
6589 	error = iwn5000_send_wimax_coex(sc);
6590 	if (error != 0) {
6591 		dev_err(sc->sc_dip, CE_WARN,
6592 		    "!could not configure WiMAX coexistence");
6593 		return error;
6594 	}
6595 	if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6596 		struct iwn5000_phy_calib_crystal cmd;
6597 
6598 		/* Perform crystal calibration. */
6599 		memset(&cmd, 0, sizeof cmd);
6600 		cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6601 		cmd.ngroups = 1;
6602 		cmd.isvalid = 1;
6603 		cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
6604 		cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
6605 		error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6606 		if (error != 0) {
6607 			dev_err(sc->sc_dip, CE_WARN,
6608 			    "!crystal calibration failed");
6609 			return error;
6610 		}
6611 	}
6612 	if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6613 		/* Query calibration from the initialization firmware. */
6614 		if ((error = iwn5000_query_calibration(sc)) != 0) {
6615 			dev_err(sc->sc_dip, CE_WARN,
6616 			    "!could not query calibration");
6617 			return error;
6618 		}
6619 		/*
6620 		 * We have the calibration results now, reboot with the
6621 		 * runtime firmware (call ourselves recursively!)
6622 		 */
6623 		iwn_hw_stop(sc, B_FALSE);
6624 		error = iwn_hw_init(sc);
6625 	} else {
6626 		/* Send calibration results to runtime firmware. */
6627 		error = iwn5000_send_calibration(sc);
6628 	}
6629 	return error;
6630 }
6631 
6632 /*
6633  * The firmware boot code is small and is intended to be copied directy into
6634  * the NIC internal memory (no DMA transfer).
6635  */
6636 static int
6637 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6638 {
6639 	int error, ntries;
6640 
6641 	size /= sizeof (uint32_t);
6642 
6643 	if ((error = iwn_nic_lock(sc)) != 0)
6644 		return error;
6645 
6646 	/* Copy microcode image into NIC memory. */
6647 	iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6648 	    /*LINTED: E_PTR_BAD_CAST_ALIGN*/
6649 	    (const uint32_t *)ucode, size);
6650 
6651 	iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6652 	iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6653 	iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6654 
6655 	/* Start boot load now. */
6656 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6657 
6658 	/* Wait for transfer to complete. */
6659 	for (ntries = 0; ntries < 1000; ntries++) {
6660 		if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6661 		    IWN_BSM_WR_CTRL_START))
6662 			break;
6663 		DELAY(10);
6664 	}
6665 	if (ntries == 1000) {
6666 		dev_err(sc->sc_dip, CE_WARN,
6667 		    "!could not load boot firmware");
6668 		iwn_nic_unlock(sc);
6669 		return ETIMEDOUT;
6670 	}
6671 
6672 	/* Enable boot after power up. */
6673 	iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6674 
6675 	iwn_nic_unlock(sc);
6676 	return 0;
6677 }
6678 
6679 static int
6680 iwn4965_load_firmware(struct iwn_softc *sc)
6681 {
6682 	struct iwn_fw_info *fw = &sc->fw;
6683 	struct iwn_dma_info *dma = &sc->fw_dma;
6684 	int error;
6685 	clock_t clk;
6686 
6687 	ASSERT(mutex_owned(&sc->sc_mtx));
6688 
6689 	/* Copy initialization sections into pre-allocated DMA-safe memory. */
6690 	memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6691 	memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6692 	    fw->init.text, fw->init.textsz);
6693 	(void) ddi_dma_sync(dma->dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
6694 
6695 	/* Tell adapter where to find initialization sections. */
6696 	if ((error = iwn_nic_lock(sc)) != 0)
6697 		return error;
6698 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6699 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6700 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6701 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6702 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6703 	iwn_nic_unlock(sc);
6704 
6705 	/* Load firmware boot code. */
6706 	error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6707 	if (error != 0) {
6708 		dev_err(sc->sc_dip, CE_WARN,
6709 		    "!could not load boot firmware");
6710 		return error;
6711 	}
6712 	/* Now press "execute". */
6713 	IWN_WRITE(sc, IWN_RESET, 0);
6714 
6715 	/* Wait at most one second for first alive notification. */
6716 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
6717 	while ((sc->sc_flags & IWN_FLAG_FW_ALIVE) == 0) {
6718 		if (cv_timedwait(&sc->sc_alive_cv, &sc->sc_mtx, clk) < 0) {
6719 			dev_err(sc->sc_dip, CE_WARN,
6720 			    "!timeout waiting for adapter to initialize");
6721 			return (IWN_FAIL);
6722 		}
6723 	}
6724 
6725 	/* Retrieve current temperature for initial TX power calibration. */
6726 	sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6727 	sc->temp = iwn4965_get_temperature(sc);
6728 	sc->sc_misc->temp.value.ul = sc->temp;
6729 
6730 	/* Copy runtime sections into pre-allocated DMA-safe memory. */
6731 	memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6732 	memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6733 	    fw->main.text, fw->main.textsz);
6734 	(void) ddi_dma_sync(dma->dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
6735 
6736 	/* Tell adapter where to find runtime sections. */
6737 	if ((error = iwn_nic_lock(sc)) != 0)
6738 		return error;
6739 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6740 	iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6741 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6742 	    (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6743 	iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6744 	    IWN_FW_UPDATED | fw->main.textsz);
6745 	iwn_nic_unlock(sc);
6746 
6747 	return 0;
6748 }
6749 
6750 static int
6751 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6752     const uint8_t *section, int size)
6753 {
6754 	struct iwn_dma_info *dma = &sc->fw_dma;
6755 	int error;
6756 	clock_t clk;
6757 
6758 	ASSERT(mutex_owned(&sc->sc_mtx));
6759 
6760 	/* Copy firmware section into pre-allocated DMA-safe memory. */
6761 	memcpy(dma->vaddr, section, size);
6762 	(void) ddi_dma_sync(dma->dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
6763 
6764 	if ((error = iwn_nic_lock(sc)) != 0)
6765 		return error;
6766 
6767 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6768 	    IWN_FH_TX_CONFIG_DMA_PAUSE);
6769 
6770 	IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6771 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6772 	    IWN_LOADDR(dma->paddr));
6773 	IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6774 	    IWN_HIADDR(dma->paddr) << 28 | size);
6775 	IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6776 	    IWN_FH_TXBUF_STATUS_TBNUM(1) |
6777 	    IWN_FH_TXBUF_STATUS_TBIDX(1) |
6778 	    IWN_FH_TXBUF_STATUS_TFBD_VALID);
6779 
6780 	/* Kick Flow Handler to start DMA transfer. */
6781 	IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6782 	    IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6783 
6784 	iwn_nic_unlock(sc);
6785 
6786 	/* Wait at most five seconds for FH DMA transfer to complete. */
6787 	clk = ddi_get_lbolt() + drv_usectohz(5000000);
6788 	while ((sc->sc_flags & IWN_FLAG_FW_DMA) == 0) {
6789 		if (cv_timedwait(&sc->sc_fhdma_cv, &sc->sc_mtx, clk) < 0)
6790 			return (IWN_FAIL);
6791 	}
6792 	sc->sc_flags &= ~IWN_FLAG_FW_DMA;
6793 
6794 	return (IWN_SUCCESS);
6795 }
6796 
6797 static int
6798 iwn5000_load_firmware(struct iwn_softc *sc)
6799 {
6800 	struct iwn_fw_part *fw;
6801 	int error;
6802 
6803 	/* Load the initialization firmware on first boot only. */
6804 	fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6805 	    &sc->fw.main : &sc->fw.init;
6806 
6807 	error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6808 	    fw->text, fw->textsz);
6809 	if (error != 0) {
6810 		dev_err(sc->sc_dip, CE_WARN,
6811 		    "!could not load firmware %s section", ".text");
6812 		return error;
6813 	}
6814 	error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6815 	    fw->data, fw->datasz);
6816 	if (error != 0) {
6817 		dev_err(sc->sc_dip, CE_WARN,
6818 		    "!could not load firmware %s section", ".data");
6819 		return error;
6820 	}
6821 
6822 	/* Now press "execute". */
6823 	IWN_WRITE(sc, IWN_RESET, 0);
6824 	return 0;
6825 }
6826 
6827 /*
6828  * Extract text and data sections from a legacy firmware image.
6829  */
6830 static int
6831 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6832 {
6833 	_NOTE(ARGUNUSED(sc));
6834 	const uint32_t *ptr;
6835 	size_t hdrlen = 24;
6836 	uint32_t rev;
6837 
6838 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
6839 	ptr = (const uint32_t *)fw->data;
6840 	rev = le32toh(*ptr++);
6841 
6842 	/* Check firmware API version. */
6843 	if (IWN_FW_API(rev) <= 1) {
6844 		dev_err(sc->sc_dip, CE_WARN,
6845 		    "!bad firmware, need API version >=2");
6846 		return EINVAL;
6847 	}
6848 	if (IWN_FW_API(rev) >= 3) {
6849 		/* Skip build number (version 2 header). */
6850 		hdrlen += 4;
6851 		ptr++;
6852 	}
6853 	if (fw->size < hdrlen) {
6854 		dev_err(sc->sc_dip, CE_WARN,
6855 		    "!firmware too short: %lld bytes", (longlong_t)fw->size);
6856 		return EINVAL;
6857 	}
6858 	fw->main.textsz = le32toh(*ptr++);
6859 	fw->main.datasz = le32toh(*ptr++);
6860 	fw->init.textsz = le32toh(*ptr++);
6861 	fw->init.datasz = le32toh(*ptr++);
6862 	fw->boot.textsz = le32toh(*ptr++);
6863 
6864 	/* Check that all firmware sections fit. */
6865 	if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6866 	    fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6867 		dev_err(sc->sc_dip, CE_WARN,
6868 		    "!firmware too short: %lld bytes", (longlong_t)fw->size);
6869 		return EINVAL;
6870 	}
6871 
6872 	/* Get pointers to firmware sections. */
6873 	fw->main.text = (const uint8_t *)ptr;
6874 	fw->main.data = fw->main.text + fw->main.textsz;
6875 	fw->init.text = fw->main.data + fw->main.datasz;
6876 	fw->init.data = fw->init.text + fw->init.textsz;
6877 	fw->boot.text = fw->init.data + fw->init.datasz;
6878 	return 0;
6879 }
6880 
6881 /*
6882  * Extract text and data sections from a TLV firmware image.
6883  */
6884 static int
6885 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6886     uint16_t alt)
6887 {
6888 	_NOTE(ARGUNUSED(sc));
6889 	const struct iwn_fw_tlv_hdr *hdr;
6890 	const struct iwn_fw_tlv *tlv;
6891 	const uint8_t *ptr, *end;
6892 	uint64_t altmask;
6893 	uint32_t len;
6894 
6895 	if (fw->size < sizeof (*hdr)) {
6896 		dev_err(sc->sc_dip, CE_WARN,
6897 		    "!firmware too short: %lld bytes", (longlong_t)fw->size);
6898 		return EINVAL;
6899 	}
6900 	hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6901 	if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6902 		dev_err(sc->sc_dip, CE_WARN,
6903 		    "!bad firmware signature 0x%08x", le32toh(hdr->signature));
6904 		return EINVAL;
6905 	}
6906 
6907 	/*
6908 	 * Select the closest supported alternative that is less than
6909 	 * or equal to the specified one.
6910 	 */
6911 	altmask = le64toh(hdr->altmask);
6912 	while (alt > 0 && !(altmask & (1ULL << alt)))
6913 		alt--;	/* Downgrade. */
6914 	IWN_DBG("using alternative %d", alt);
6915 
6916 	ptr = (const uint8_t *)(hdr + 1);
6917 	end = (const uint8_t *)(fw->data + fw->size);
6918 
6919 	/* Parse type-length-value fields. */
6920 	while (ptr + sizeof (*tlv) <= end) {
6921 		tlv = (const struct iwn_fw_tlv *)ptr;
6922 		len = le32toh(tlv->len);
6923 
6924 		ptr += sizeof (*tlv);
6925 		if (ptr + len > end) {
6926 			dev_err(sc->sc_dip, CE_WARN,
6927 			    "!firmware too short: %lld bytes",
6928 			    (longlong_t)fw->size);
6929 			return EINVAL;
6930 		}
6931 		/* Skip other alternatives. */
6932 		if (tlv->alt != 0 && le16toh(tlv->alt) != alt) {
6933 			IWN_DBG("skipping other alternative");
6934 			goto next;
6935 		}
6936 
6937 		switch (le16toh(tlv->type)) {
6938 		case IWN_FW_TLV_MAIN_TEXT:
6939 			fw->main.text = ptr;
6940 			fw->main.textsz = len;
6941 			break;
6942 		case IWN_FW_TLV_MAIN_DATA:
6943 			fw->main.data = ptr;
6944 			fw->main.datasz = len;
6945 			break;
6946 		case IWN_FW_TLV_INIT_TEXT:
6947 			fw->init.text = ptr;
6948 			fw->init.textsz = len;
6949 			break;
6950 		case IWN_FW_TLV_INIT_DATA:
6951 			fw->init.data = ptr;
6952 			fw->init.datasz = len;
6953 			break;
6954 		case IWN_FW_TLV_BOOT_TEXT:
6955 			fw->boot.text = ptr;
6956 			fw->boot.textsz = len;
6957 			break;
6958 		case IWN_FW_TLV_ENH_SENS:
6959 			if (len != 0) {
6960 				dev_err(sc->sc_dip, CE_WARN,
6961 				    "!TLV type %d has invalid size %u",
6962 				    le16toh(tlv->type), len);
6963 				goto next;
6964 			}
6965 			sc->sc_flags |= IWN_FLAG_ENH_SENS;
6966 			break;
6967 		case IWN_FW_TLV_PHY_CALIB:
6968 			if (len != sizeof(uint32_t)) {
6969 				dev_err(sc->sc_dip, CE_WARN,
6970 				    "!TLV type %d has invalid size %u",
6971 				    le16toh(tlv->type), len);
6972 				goto next;
6973 			}
6974 			if (le32toh(*ptr) <= IWN5000_PHY_CALIB_MAX) {
6975 				sc->reset_noise_gain = le32toh(*ptr);
6976 				sc->noise_gain = le32toh(*ptr) + 1;
6977 			}
6978 			break;
6979 		case IWN_FW_TLV_FLAGS:
6980 			if (len < sizeof(uint32_t))
6981 				break;
6982 			if (len % sizeof(uint32_t))
6983 				break;
6984 			sc->tlv_feature_flags = le32toh(*ptr);
6985 			IWN_DBG("feature: 0x%08x", sc->tlv_feature_flags);
6986 			break;
6987 		default:
6988 			IWN_DBG("TLV type %d not handled", le16toh(tlv->type));
6989 			break;
6990 		}
6991  next:		/* TLV fields are 32-bit aligned. */
6992 		ptr += (len + 3) & ~3;
6993 	}
6994 	return 0;
6995 }
6996 
6997 static int
6998 iwn_read_firmware(struct iwn_softc *sc)
6999 {
7000 	struct iwn_fw_info *fw = &sc->fw;
7001 	firmware_handle_t fwh;
7002 	int error;
7003 
7004 	/*
7005 	 * Some PHY calibration commands are firmware-dependent; these
7006 	 * are the default values that will be overridden if
7007 	 * necessary.
7008 	 */
7009 	sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
7010 	sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
7011 
7012 	/* Initialize for error returns */
7013 	fw->data = NULL;
7014 	fw->size = 0;
7015 
7016 	/* Open firmware image. */
7017 	if ((error = firmware_open("iwn", sc->fwname, &fwh)) != 0) {
7018 		dev_err(sc->sc_dip, CE_WARN,
7019 		    "!could not get firmware handle %s", sc->fwname);
7020 		return error;
7021 	}
7022 	fw->size = firmware_get_size(fwh);
7023 	if (fw->size < sizeof (uint32_t)) {
7024 		dev_err(sc->sc_dip, CE_WARN,
7025 		    "!firmware too short: %lld bytes", (longlong_t)fw->size);
7026 		(void) firmware_close(fwh);
7027 		return EINVAL;
7028 	}
7029 
7030 	/* Read the firmware. */
7031 	fw->data = kmem_alloc(fw->size, KM_SLEEP);
7032 	error = firmware_read(fwh, 0, fw->data, fw->size);
7033 	(void) firmware_close(fwh);
7034 	if (error != 0) {
7035 		dev_err(sc->sc_dip, CE_WARN,
7036 		    "!could not read firmware %s", sc->fwname);
7037 		goto out;
7038 	}
7039 
7040 	/* Retrieve text and data sections. */
7041 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
7042 	if (*(const uint32_t *)fw->data != 0)	/* Legacy image. */
7043 		error = iwn_read_firmware_leg(sc, fw);
7044 	else
7045 		error = iwn_read_firmware_tlv(sc, fw, 1);
7046 	if (error != 0) {
7047 		dev_err(sc->sc_dip, CE_WARN,
7048 		    "!could not read firmware sections");
7049 		goto out;
7050 	}
7051 
7052 	/* Make sure text and data sections fit in hardware memory. */
7053 	if (fw->main.textsz > sc->fw_text_maxsz ||
7054 	    fw->main.datasz > sc->fw_data_maxsz ||
7055 	    fw->init.textsz > sc->fw_text_maxsz ||
7056 	    fw->init.datasz > sc->fw_data_maxsz ||
7057 	    fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
7058 	    (fw->boot.textsz & 3) != 0) {
7059 		dev_err(sc->sc_dip, CE_WARN,
7060 		    "!firmware sections too large");
7061 		goto out;
7062 	}
7063 
7064 	/* We can proceed with loading the firmware. */
7065 	return 0;
7066 out:
7067 	kmem_free(fw->data, fw->size);
7068 	fw->data = NULL;
7069 	fw->size = 0;
7070 	return error ? error : EINVAL;
7071 }
7072 
7073 static int
7074 iwn_clock_wait(struct iwn_softc *sc)
7075 {
7076 	int ntries;
7077 
7078 	/* Set "initialization complete" bit. */
7079 	IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
7080 
7081 	/* Wait for clock stabilization. */
7082 	for (ntries = 0; ntries < 2500; ntries++) {
7083 		if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
7084 			return 0;
7085 		DELAY(10);
7086 	}
7087 	dev_err(sc->sc_dip, CE_WARN,
7088 	    "!timeout waiting for clock stabilization");
7089 	return ETIMEDOUT;
7090 }
7091 
7092 static int
7093 iwn_apm_init(struct iwn_softc *sc)
7094 {
7095 	uint32_t reg;
7096 	int error;
7097 
7098 	/* Disable L0s exit timer (NMI bug workaround). */
7099 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
7100 	/* Don't wait for ICH L0s (ICH bug workaround). */
7101 	IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
7102 
7103 	/* Set FH wait threshold to max (HW bug under stress workaround). */
7104 	IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
7105 
7106 	/* Enable HAP INTA to move adapter from L1a to L0s. */
7107 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
7108 
7109 	/* Retrieve PCIe Active State Power Management (ASPM). */
7110 	reg = pci_config_get32(sc->sc_pcih,
7111 	    sc->sc_cap_off + PCIE_LINKCTL);
7112 	/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
7113 	if (reg & PCIE_LINKCTL_ASPM_CTL_L1)	/* L1 Entry enabled. */
7114 		IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
7115 	else
7116 		IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
7117 
7118 	if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
7119 	    sc->hw_type <= IWN_HW_REV_TYPE_1000)
7120 		IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
7121 
7122 	/* Wait for clock stabilization before accessing prph. */
7123 	if ((error = iwn_clock_wait(sc)) != 0)
7124 		return error;
7125 
7126 	if ((error = iwn_nic_lock(sc)) != 0)
7127 		return error;
7128 	if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
7129 		/* Enable DMA and BSM (Bootstrap State Machine). */
7130 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
7131 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
7132 		    IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
7133 	} else {
7134 		/* Enable DMA. */
7135 		iwn_prph_write(sc, IWN_APMG_CLK_EN,
7136 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7137 	}
7138 	DELAY(20);
7139 	/* Disable L1-Active. */
7140 	iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
7141 	iwn_nic_unlock(sc);
7142 
7143 	return 0;
7144 }
7145 
7146 static void
7147 iwn_apm_stop_master(struct iwn_softc *sc)
7148 {
7149 	int ntries;
7150 
7151 	/* Stop busmaster DMA activity. */
7152 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
7153 	for (ntries = 0; ntries < 100; ntries++) {
7154 		if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
7155 			return;
7156 		DELAY(10);
7157 	}
7158 	dev_err(sc->sc_dip, CE_WARN,
7159 	    "!timeout waiting for master");
7160 }
7161 
7162 static void
7163 iwn_apm_stop(struct iwn_softc *sc)
7164 {
7165 	iwn_apm_stop_master(sc);
7166 
7167 	/* Reset the entire device. */
7168 	IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
7169 	DELAY(10);
7170 	/* Clear "initialization complete" bit. */
7171 	IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
7172 }
7173 
7174 static int
7175 iwn4965_nic_config(struct iwn_softc *sc)
7176 {
7177 	if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
7178 		/*
7179 		 * I don't believe this to be correct but this is what the
7180 		 * vendor driver is doing. Probably the bits should not be
7181 		 * shifted in IWN_RFCFG_*.
7182 		 */
7183 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7184 		    IWN_RFCFG_TYPE(sc->rfcfg) |
7185 		    IWN_RFCFG_STEP(sc->rfcfg) |
7186 		    IWN_RFCFG_DASH(sc->rfcfg));
7187 	}
7188 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7189 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7190 	return 0;
7191 }
7192 
7193 static int
7194 iwn5000_nic_config(struct iwn_softc *sc)
7195 {
7196 	uint32_t tmp;
7197 	int error;
7198 
7199 	if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
7200 		IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7201 		    IWN_RFCFG_TYPE(sc->rfcfg) |
7202 		    IWN_RFCFG_STEP(sc->rfcfg) |
7203 		    IWN_RFCFG_DASH(sc->rfcfg));
7204 	}
7205 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
7206 	    IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
7207 
7208 	if ((error = iwn_nic_lock(sc)) != 0)
7209 		return error;
7210 	iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
7211 
7212 	if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
7213 		/*
7214 		 * Select first Switching Voltage Regulator (1.32V) to
7215 		 * solve a stability issue related to noisy DC2DC line
7216 		 * in the silicon of 1000 Series.
7217 		 */
7218 		tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
7219 		tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
7220 		tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
7221 		iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
7222 	}
7223 	iwn_nic_unlock(sc);
7224 
7225 	if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
7226 		/* Use internal power amplifier only. */
7227 		IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
7228 	}
7229 	if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
7230 		sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
7231 		/* Indicate that ROM calibration version is >=6. */
7232 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
7233 	}
7234 	if (sc->hw_type == IWN_HW_REV_TYPE_6005)
7235 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
7236 	if (sc->hw_type == IWN_HW_REV_TYPE_2030 ||
7237 	    sc->hw_type == IWN_HW_REV_TYPE_2000 ||
7238 	    sc->hw_type == IWN_HW_REV_TYPE_135  ||
7239 	    sc->hw_type == IWN_HW_REV_TYPE_105)
7240 		IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_IQ_INVERT);
7241 	return 0;
7242 }
7243 
7244 /*
7245  * Take NIC ownership over Intel Active Management Technology (AMT).
7246  */
7247 static int
7248 iwn_hw_prepare(struct iwn_softc *sc)
7249 {
7250 	int ntries;
7251 
7252 	/* Check if hardware is ready. */
7253 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7254 	for (ntries = 0; ntries < 5; ntries++) {
7255 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7256 		    IWN_HW_IF_CONFIG_NIC_READY)
7257 			return 0;
7258 		DELAY(10);
7259 	}
7260 
7261 	/* Hardware not ready, force into ready state. */
7262 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
7263 	for (ntries = 0; ntries < 15000; ntries++) {
7264 		if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
7265 		    IWN_HW_IF_CONFIG_PREPARE_DONE))
7266 			break;
7267 		DELAY(10);
7268 	}
7269 	if (ntries == 15000)
7270 		return ETIMEDOUT;
7271 
7272 	/* Hardware should be ready now. */
7273 	IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
7274 	for (ntries = 0; ntries < 5; ntries++) {
7275 		if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
7276 		    IWN_HW_IF_CONFIG_NIC_READY)
7277 			return 0;
7278 		DELAY(10);
7279 	}
7280 	return ETIMEDOUT;
7281 }
7282 
7283 static int
7284 iwn_hw_init(struct iwn_softc *sc)
7285 {
7286 	struct iwn_ops *ops = &sc->ops;
7287 	int error, chnl, qid;
7288 	clock_t clk;
7289 	uint32_t rx_config;
7290 
7291 	ASSERT(mutex_owned(&sc->sc_mtx));
7292 
7293 	/* Clear pending interrupts. */
7294 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7295 
7296 	if ((error = iwn_apm_init(sc)) != 0) {
7297 		dev_err(sc->sc_dip, CE_WARN,
7298 		    "!could not power ON adapter");
7299 		return error;
7300 	}
7301 
7302 	/* Select VMAIN power source. */
7303 	if ((error = iwn_nic_lock(sc)) != 0)
7304 		return error;
7305 	iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
7306 	iwn_nic_unlock(sc);
7307 
7308 	/* Perform adapter-specific initialization. */
7309 	if ((error = ops->nic_config(sc)) != 0)
7310 		return error;
7311 
7312 	/* Initialize RX ring. */
7313 	if ((error = iwn_nic_lock(sc)) != 0)
7314 		return error;
7315 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
7316 	IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
7317 	/* Set physical address of RX ring (256-byte aligned). */
7318 	IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
7319 	/* Set physical address of RX status (16-byte aligned). */
7320 	IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
7321 	/* Enable RX. */
7322 	rx_config =
7323 	    IWN_FH_RX_CONFIG_ENA	   |
7324 #if IWN_RBUF_SIZE == 8192
7325 	    IWN_FH_RX_CONFIG_RB_SIZE_8K    |
7326 #endif
7327 	    IWN_FH_RX_CONFIG_IGN_RXF_EMPTY |	/* HW bug workaround */
7328 	    IWN_FH_RX_CONFIG_IRQ_DST_HOST  |
7329 	    IWN_FH_RX_CONFIG_SINGLE_FRAME  |
7330 	    IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
7331 	    IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG);
7332 	IWN_WRITE(sc, IWN_FH_RX_CONFIG, rx_config);
7333 	iwn_nic_unlock(sc);
7334 	IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
7335 
7336 	if ((error = iwn_nic_lock(sc)) != 0)
7337 		return error;
7338 
7339 	/* Initialize TX scheduler. */
7340 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7341 
7342 	/* Set physical address of "keep warm" page (16-byte aligned). */
7343 	IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
7344 
7345 	/* Initialize TX rings. */
7346 	for (qid = 0; qid < sc->ntxqs; qid++) {
7347 		struct iwn_tx_ring *txq = &sc->txq[qid];
7348 
7349 		/* Set physical address of TX ring (256-byte aligned). */
7350 		IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
7351 		    txq->desc_dma.paddr >> 8);
7352 	}
7353 	iwn_nic_unlock(sc);
7354 
7355 	/* Enable DMA channels. */
7356 	for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7357 		IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
7358 		    IWN_FH_TX_CONFIG_DMA_ENA |
7359 		    IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
7360 	}
7361 
7362 	/* Clear "radio off" and "commands blocked" bits. */
7363 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7364 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
7365 
7366 	/* Clear pending interrupts. */
7367 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7368 	/* Enable interrupt coalescing. */
7369 	IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 32);
7370 	/* Enable interrupts. */
7371 	IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
7372 
7373 	/* _Really_ make sure "radio off" bit is cleared! */
7374 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7375 	IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
7376 
7377 	/* Enable shadow registers. */
7378 	if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
7379 		IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
7380 
7381 	if ((error = ops->load_firmware(sc)) != 0) {
7382 		dev_err(sc->sc_dip, CE_WARN,
7383 		    "!could not load firmware");
7384 		return error;
7385 	}
7386 	/* Wait at most one second for firmware alive notification. */
7387 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
7388 	while ((sc->sc_flags & IWN_FLAG_FW_ALIVE) == 0) {
7389 		if (cv_timedwait(&sc->sc_alive_cv, &sc->sc_mtx, clk) < 0) {
7390 			dev_err(sc->sc_dip, CE_WARN,
7391 			    "!timeout waiting for adapter to initialize");
7392 			return (IWN_FAIL);
7393 		}
7394 	}
7395 	/* Do post-firmware initialization. */
7396 	return ops->post_alive(sc);
7397 }
7398 
7399 static void
7400 iwn_hw_stop(struct iwn_softc *sc, boolean_t lock)
7401 {
7402 	int chnl, qid, ntries;
7403 
7404 	if (lock) {
7405 		mutex_enter(&sc->sc_mtx);
7406 	}
7407 
7408 	IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
7409 
7410 	/* Disable interrupts. */
7411 	IWN_WRITE(sc, IWN_INT_MASK, 0);
7412 	IWN_WRITE(sc, IWN_INT, 0xffffffff);
7413 	IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
7414 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7415 
7416 	/* Make sure we no longer hold the NIC lock. */
7417 	iwn_nic_unlock(sc);
7418 
7419 	/* Stop TX scheduler. */
7420 	iwn_prph_write(sc, sc->sched_txfact_addr, 0);
7421 
7422 	/* Stop all DMA channels. */
7423 	if (iwn_nic_lock(sc) == 0) {
7424 		for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
7425 			IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
7426 			for (ntries = 0; ntries < 200; ntries++) {
7427 				if (IWN_READ(sc, IWN_FH_TX_STATUS) &
7428 				    IWN_FH_TX_STATUS_IDLE(chnl))
7429 					break;
7430 				DELAY(10);
7431 			}
7432 		}
7433 		iwn_nic_unlock(sc);
7434 	}
7435 
7436 	/* Stop RX ring. */
7437 	iwn_reset_rx_ring(sc, &sc->rxq);
7438 
7439 	/* Reset all TX rings. */
7440 	for (qid = 0; qid < sc->ntxqs; qid++)
7441 		iwn_reset_tx_ring(sc, &sc->txq[qid]);
7442 
7443 	if (iwn_nic_lock(sc) == 0) {
7444 		iwn_prph_write(sc, IWN_APMG_CLK_DIS,
7445 		    IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
7446 		iwn_nic_unlock(sc);
7447 	}
7448 	DELAY(5);
7449 	/* Power OFF adapter. */
7450 	iwn_apm_stop(sc);
7451 
7452 	sc->sc_flags &= ~(IWN_FLAG_HW_INITED | IWN_FLAG_FW_ALIVE);
7453 
7454 	if (lock) {
7455 		mutex_exit(&sc->sc_mtx);
7456 	}
7457 }
7458 
7459 static int
7460 iwn_init(struct iwn_softc *sc)
7461 {
7462 	int error;
7463 
7464 	mutex_enter(&sc->sc_mtx);
7465 	if (sc->sc_flags & IWN_FLAG_HW_INITED)
7466 		goto out;
7467 	if ((error = iwn_hw_prepare(sc)) != 0) {
7468 		dev_err(sc->sc_dip, CE_WARN, "!hardware not ready");
7469 		goto fail;
7470 	}
7471 
7472 	/* Check that the radio is not disabled by hardware switch. */
7473 	if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7474 		dev_err(sc->sc_dip, CE_WARN,
7475 		    "!radio is disabled by hardware switch");
7476 		error = EPERM;	/* :-) */
7477 		goto fail;
7478 	}
7479 
7480 	/* Read firmware images from the filesystem. */
7481 	if ((error = iwn_read_firmware(sc)) != 0) {
7482 		dev_err(sc->sc_dip, CE_WARN, "!could not read firmware");
7483 		goto fail;
7484 	}
7485 
7486 	/* Initialize interrupt mask to default value. */
7487 	sc->int_mask = IWN_INT_MASK_DEF;
7488 	sc->sc_flags &= ~IWN_FLAG_USE_ICT;
7489 
7490 	/* Initialize hardware and upload firmware. */
7491 	ASSERT(sc->fw.data != NULL && sc->fw.size > 0);
7492 	error = iwn_hw_init(sc);
7493 	if (error != 0) {
7494 		dev_err(sc->sc_dip, CE_WARN, "!could not initialize hardware");
7495 		goto fail;
7496 	}
7497 
7498 	/* Configure adapter now that it is ready. */
7499 	if ((error = iwn_config(sc)) != 0) {
7500 		dev_err(sc->sc_dip, CE_WARN, "!could not configure device");
7501 		goto fail;
7502 	}
7503 
7504 	sc->sc_flags |= IWN_FLAG_HW_INITED;
7505 out:
7506 	mutex_exit(&sc->sc_mtx);
7507 	return 0;
7508 
7509 fail:
7510 	iwn_hw_stop(sc, B_FALSE);
7511 	mutex_exit(&sc->sc_mtx);
7512 	return error;
7513 }
7514 
7515 /*
7516  * XXX code from usr/src/uts/common/io/net80211/net880211_output.c
7517  * Copyright (c) 2001 Atsushi Onoe
7518  * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
7519  * Copyright (c) 2007-2009 Damien Bergamini
7520  * All rights reserved.
7521  */
7522 
7523 /*
7524  * Add SSID element to a frame
7525  */
7526 static uint8_t *
7527 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, uint32_t len)
7528 {
7529 	*frm++ = IEEE80211_ELEMID_SSID;
7530 	*frm++ = (uint8_t)len;
7531 	bcopy(ssid, frm, len);
7532 	return (frm + len);
7533 }
7534 
7535 /*
7536  * Add supported rates information element to a frame.
7537  */
7538 static uint8_t *
7539 ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs)
7540 {
7541 	uint8_t nrates;
7542 
7543 	*frm++ = IEEE80211_ELEMID_RATES;
7544 	nrates = rs->ir_nrates;
7545 	if (nrates > IEEE80211_RATE_SIZE)
7546 		nrates = IEEE80211_RATE_SIZE;
7547 	*frm++ = nrates;
7548 	bcopy(rs->ir_rates, frm, nrates);
7549 	return (frm + nrates);
7550 }
7551 
7552 /*
7553  * Add extended supported rates element to a frame, usually for 11g mode
7554  */
7555 static uint8_t *
7556 ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs)
7557 {
7558 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
7559 		uint8_t nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
7560 
7561 		*frm++ = IEEE80211_ELEMID_XRATES;
7562 		*frm++ = nrates;
7563 		bcopy(rs->ir_rates + IEEE80211_RATE_SIZE, frm, nrates);
7564 		frm += nrates;
7565 	}
7566 	return (frm);
7567 }
7568 
7569 /*
7570  * XXX: Hack to set the current channel to the value advertised in beacons or
7571  * probe responses. Only used during AP detection.
7572  * XXX: Duplicated from if_iwi.c
7573  */
7574 static void
7575 iwn_fix_channel(struct iwn_softc *sc, mblk_t *m,
7576     struct iwn_rx_stat *stat)
7577 {
7578 	struct ieee80211com *ic = &sc->sc_ic;
7579 	struct ieee80211_frame *wh;
7580 	uint8_t subtype;
7581 	uint8_t *frm, *efrm;
7582 
7583 	wh = (struct ieee80211_frame *)m->b_rptr;
7584 
7585 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
7586 		return;
7587 
7588 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
7589 
7590 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
7591 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
7592 		return;
7593 
7594 	if (sc->sc_flags & IWN_FLAG_SCANNING_5GHZ) {
7595 		int chan = le16toh(stat->chan);
7596 		if (chan < __arraycount(ic->ic_sup_channels))
7597 			ic->ic_curchan = &ic->ic_sup_channels[chan];
7598 		return;
7599 	}
7600 
7601 	frm = (uint8_t *)(wh + 1);
7602 	efrm = (uint8_t *)m->b_wptr;
7603 
7604 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
7605 	while (frm < efrm) {
7606 		if (*frm == IEEE80211_ELEMID_DSPARMS)
7607 #if IEEE80211_CHAN_MAX < 255
7608 		if (frm[2] <= IEEE80211_CHAN_MAX)
7609 #endif
7610 			ic->ic_curchan = &ic->ic_sup_channels[frm[2]];
7611 
7612 		frm += frm[1] + 2;
7613 	}
7614 }
7615 
7616 /*
7617  * invoked by GLD to start or open NIC
7618  */
7619 static int
7620 iwn_m_start(void *arg)
7621 {
7622 	struct iwn_softc *sc;
7623 	ieee80211com_t	*ic;
7624 	int err = IWN_FAIL;
7625 
7626 	sc = (struct iwn_softc *)arg;
7627 	ASSERT(sc != NULL);
7628 	ic = &sc->sc_ic;
7629 
7630 	err = iwn_init(sc);
7631 	if (err != IWN_SUCCESS) {
7632 		/*
7633 		 * If initialization failed because the RF switch is off,
7634 		 * return success anyway to make the 'plumb' succeed.
7635 		 * The iwn_thread() tries to re-init background.
7636 		 */
7637 		if (err == EPERM &&
7638 		    !(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
7639 			mutex_enter(&sc->sc_mtx);
7640 			sc->sc_flags |= IWN_FLAG_HW_ERR_RECOVER;
7641 			sc->sc_flags |= IWN_FLAG_RADIO_OFF;
7642 			mutex_exit(&sc->sc_mtx);
7643 			return (IWN_SUCCESS);
7644 		}
7645 
7646 		return (err);
7647 	}
7648 
7649 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
7650 
7651 	mutex_enter(&sc->sc_mtx);
7652 	sc->sc_flags |= IWN_FLAG_RUNNING;
7653 	mutex_exit(&sc->sc_mtx);
7654 
7655 	return (IWN_SUCCESS);
7656 }
7657 
7658 /*
7659  * invoked by GLD to stop or down NIC
7660  */
7661 static void
7662 iwn_m_stop(void *arg)
7663 {
7664 	struct iwn_softc *sc;
7665 	ieee80211com_t	*ic;
7666 
7667 	sc = (struct iwn_softc *)arg;
7668 	ASSERT(sc != NULL);
7669 	ic = &sc->sc_ic;
7670 
7671 	iwn_hw_stop(sc, B_TRUE);
7672 
7673 	/*
7674 	 * release buffer for calibration
7675 	 */
7676 
7677 	ieee80211_stop_watchdog(ic);
7678 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
7679 
7680 	mutex_enter(&sc->sc_mtx);
7681 	sc->sc_flags &= ~IWN_FLAG_HW_ERR_RECOVER;
7682 	sc->sc_flags &= ~IWN_FLAG_RATE_AUTO_CTL;
7683 
7684 	sc->sc_flags &= ~IWN_FLAG_RUNNING;
7685 	sc->sc_flags &= ~IWN_FLAG_SCANNING;
7686 	mutex_exit(&sc->sc_mtx);
7687 }
7688 
7689 
7690 /*
7691  * Module Loading Data & Entry Points
7692  */
7693 DDI_DEFINE_STREAM_OPS(iwn_devops, nulldev, nulldev, iwn_attach,
7694     iwn_detach, nodev, NULL, D_MP, NULL, iwn_quiesce);
7695 
7696 static struct modldrv iwn_modldrv = {
7697 	&mod_driverops,
7698 	"Intel WiFi Link 4965 and 1000/5000/6000 series driver",
7699 	&iwn_devops
7700 };
7701 
7702 static struct modlinkage iwn_modlinkage = {
7703 	MODREV_1,
7704 	&iwn_modldrv,
7705 	NULL
7706 };
7707 
7708 int
7709 _init(void)
7710 {
7711 	int	status;
7712 
7713 	status = ddi_soft_state_init(&iwn_state,
7714 	    sizeof (struct iwn_softc), 1);
7715 	if (status != DDI_SUCCESS)
7716 		return (status);
7717 
7718 	mac_init_ops(&iwn_devops, "iwn");
7719 	status = mod_install(&iwn_modlinkage);
7720 	if (status != DDI_SUCCESS) {
7721 		mac_fini_ops(&iwn_devops);
7722 		ddi_soft_state_fini(&iwn_state);
7723 	}
7724 
7725 	return (status);
7726 }
7727 
7728 int
7729 _fini(void)
7730 {
7731 	int status;
7732 
7733 	status = mod_remove(&iwn_modlinkage);
7734 	if (status == DDI_SUCCESS) {
7735 		mac_fini_ops(&iwn_devops);
7736 		ddi_soft_state_fini(&iwn_state);
7737 	}
7738 
7739 	return (status);
7740 }
7741 
7742 int
7743 _info(struct modinfo *mip)
7744 {
7745 	return (mod_info(&iwn_modlinkage, mip));
7746 }
7747