if_iwm - Remove deprecated scan API definitions.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154
155 #include <machine/endian.h>
156
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159
160 #include <net/bpf.h>
161
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192
193 const uint8_t iwm_nvm_channels[] = {
194         /* 2.4 GHz */
195         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196         /* 5 GHz */
197         36, 40, 44, 48, 52, 56, 60, 64,
198         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199         149, 153, 157, 161, 165
200 };
201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
202     "IWM_NUM_CHANNELS is too small");
203
204 const uint8_t iwm_nvm_channels_8000[] = {
205         /* 2.4 GHz */
206         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207         /* 5 GHz */
208         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
209         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
210         149, 153, 157, 161, 165, 169, 173, 177, 181
211 };
212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
213     "IWM_NUM_CHANNELS_8000 is too small");
214
215 #define IWM_NUM_2GHZ_CHANNELS   14
216 #define IWM_N_HW_ADDR_MASK      0xF
217
218 /*
219  * XXX For now, there's simply a fixed set of rate table entries
220  * that are populated.
221  */
222 const struct iwm_rate {
223         uint8_t rate;
224         uint8_t plcp;
225 } iwm_rates[] = {
226         {   2,  IWM_RATE_1M_PLCP  },
227         {   4,  IWM_RATE_2M_PLCP  },
228         {  11,  IWM_RATE_5M_PLCP  },
229         {  22,  IWM_RATE_11M_PLCP },
230         {  12,  IWM_RATE_6M_PLCP  },
231         {  18,  IWM_RATE_9M_PLCP  },
232         {  24,  IWM_RATE_12M_PLCP },
233         {  36,  IWM_RATE_18M_PLCP },
234         {  48,  IWM_RATE_24M_PLCP },
235         {  72,  IWM_RATE_36M_PLCP },
236         {  96,  IWM_RATE_48M_PLCP },
237         { 108,  IWM_RATE_54M_PLCP },
238 };
239 #define IWM_RIDX_CCK    0
240 #define IWM_RIDX_OFDM   4
241 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244
245 struct iwm_nvm_section {
246         uint16_t length;
247         uint8_t *data;
248 };
249
250 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
251 static int      iwm_firmware_store_section(struct iwm_softc *,
252                                            enum iwm_ucode_type,
253                                            const uint8_t *, size_t);
254 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
255 static void     iwm_fw_info_free(struct iwm_fw_info *);
256 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 #if !defined(__DragonFly__)
258 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
259 #endif
260 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
261                                      bus_size_t, bus_size_t);
262 static void     iwm_dma_contig_free(struct iwm_dma_info *);
263 static int      iwm_alloc_fwmem(struct iwm_softc *);
264 static int      iwm_alloc_sched(struct iwm_softc *);
265 static int      iwm_alloc_kw(struct iwm_softc *);
266 static int      iwm_alloc_ict(struct iwm_softc *);
267 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
268 static void     iwm_disable_rx_dma(struct iwm_softc *);
269 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
270 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
271 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
272                                   int);
273 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
274 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
275 static void     iwm_enable_interrupts(struct iwm_softc *);
276 static void     iwm_restore_interrupts(struct iwm_softc *);
277 static void     iwm_disable_interrupts(struct iwm_softc *);
278 static void     iwm_ict_reset(struct iwm_softc *);
279 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
280 static void     iwm_stop_device(struct iwm_softc *);
281 static void     iwm_mvm_nic_config(struct iwm_softc *);
282 static int      iwm_nic_rx_init(struct iwm_softc *);
283 static int      iwm_nic_tx_init(struct iwm_softc *);
284 static int      iwm_nic_init(struct iwm_softc *);
285 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int      iwm_post_alive(struct iwm_softc *);
287 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
288                                    uint16_t, uint8_t *, uint16_t *);
289 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
290                                      uint16_t *, size_t);
291 static uint32_t iwm_eeprom_channel_flags(uint16_t);
292 static void     iwm_add_channel_band(struct iwm_softc *,
293                     struct ieee80211_channel[], int, int *, int, size_t,
294                     const uint8_t[]);
295 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
296                     struct ieee80211_channel[]);
297 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
298                                    const uint16_t *, const uint16_t *,
299                                    const uint16_t *, const uint16_t *,
300                                    const uint16_t *);
301 static void     iwm_set_hw_address_8000(struct iwm_softc *,
302                                         struct iwm_nvm_data *,
303                                         const uint16_t *, const uint16_t *);
304 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
305                             const uint16_t *);
306 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
307 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
308                                   const uint16_t *);
309 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
310                                    const uint16_t *);
311 static void     iwm_set_radio_cfg(const struct iwm_softc *,
312                                   struct iwm_nvm_data *, uint32_t);
313 static int      iwm_parse_nvm_sections(struct iwm_softc *,
314                                        struct iwm_nvm_section *);
315 static int      iwm_nvm_init(struct iwm_softc *);
316 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
317                                        const uint8_t *, uint32_t);
318 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
319                                         const uint8_t *, uint32_t);
320 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
321 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
322                                            struct iwm_fw_sects *, int , int *);
323 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
324 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
325 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
326 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
327 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
328 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
329                                               enum iwm_ucode_type);
330 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
331 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
332 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
333 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
334                                             struct iwm_rx_phy_info *);
335 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
336                                       struct iwm_rx_packet *,
337                                       struct iwm_rx_data *);
338 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
339 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
340                                    struct iwm_rx_data *);
341 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
342                                          struct iwm_rx_packet *,
343                                          struct iwm_node *);
344 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
345                                   struct iwm_rx_data *);
346 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
347 #if 0
348 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
349                                  uint16_t);
350 #endif
351 static const struct iwm_rate *
352         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
353                         struct ieee80211_frame *, struct iwm_tx_cmd *);
354 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
355                        struct ieee80211_node *, int);
356 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
357                              const struct ieee80211_bpf_params *);
358 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
359                                                 struct iwm_mvm_add_sta_cmd_v7 *,
360                                                 int *);
361 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
362                                        int);
363 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
364 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
365 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
366                                            struct iwm_int_sta *,
367                                            const uint8_t *, uint16_t, uint16_t);
368 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
369 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
370 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
371 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
372 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
373 static struct ieee80211_node *
374                 iwm_node_alloc(struct ieee80211vap *,
375                                const uint8_t[IEEE80211_ADDR_LEN]);
376 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
377 static int      iwm_media_change(struct ifnet *);
378 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
379 static void     iwm_endscan_cb(void *, int);
380 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
381                                         struct iwm_sf_cfg_cmd *,
382                                         struct ieee80211_node *);
383 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
384 static int      iwm_send_bt_init_conf(struct iwm_softc *);
385 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
386 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
387 static int      iwm_init_hw(struct iwm_softc *);
388 static void     iwm_init(struct iwm_softc *);
389 static void     iwm_start(struct iwm_softc *);
390 static void     iwm_stop(struct iwm_softc *);
391 static void     iwm_watchdog(void *);
392 static void     iwm_parent(struct ieee80211com *);
393 #ifdef IWM_DEBUG
394 static const char *
395                 iwm_desc_lookup(uint32_t);
396 static void     iwm_nic_error(struct iwm_softc *);
397 static void     iwm_nic_umac_error(struct iwm_softc *);
398 #endif
399 static void     iwm_notif_intr(struct iwm_softc *);
400 static void     iwm_intr(void *);
401 static int      iwm_attach(device_t);
402 static int      iwm_is_valid_ether_addr(uint8_t *);
403 static void     iwm_preinit(void *);
404 static int      iwm_detach_local(struct iwm_softc *sc, int);
405 static void     iwm_init_task(void *);
406 static void     iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408                 iwm_vap_create(struct ieee80211com *,
409                                const char [IFNAMSIZ], int,
410                                enum ieee80211_opmode, int,
411                                const uint8_t [IEEE80211_ADDR_LEN],
412                                const uint8_t [IEEE80211_ADDR_LEN]);
413 static void     iwm_vap_delete(struct ieee80211vap *);
414 static void     iwm_scan_start(struct ieee80211com *);
415 static void     iwm_scan_end(struct ieee80211com *);
416 static void     iwm_update_mcast(struct ieee80211com *);
417 static void     iwm_set_channel(struct ieee80211com *);
418 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
419 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
420 static int      iwm_detach(device_t);
421
422 #if defined(__DragonFly__)
423 static int      iwm_msi_enable = 1;
424
425 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
426
427 #endif
428
429 /*
430  * Firmware parser.
431  */
432
433 static int
434 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
435 {
436         const struct iwm_fw_cscheme_list *l = (const void *)data;
437
438         if (dlen < sizeof(*l) ||
439             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
440                 return EINVAL;
441
442         /* we don't actually store anything for now, always use s/w crypto */
443
444         return 0;
445 }
446
447 static int
448 iwm_firmware_store_section(struct iwm_softc *sc,
449     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
450 {
451         struct iwm_fw_sects *fws;
452         struct iwm_fw_onesect *fwone;
453
454         if (type >= IWM_UCODE_TYPE_MAX)
455                 return EINVAL;
456         if (dlen < sizeof(uint32_t))
457                 return EINVAL;
458
459         fws = &sc->sc_fw.fw_sects[type];
460         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
461                 return EINVAL;
462
463         fwone = &fws->fw_sect[fws->fw_count];
464
465         /* first 32bit are device load offset */
466         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
467
468         /* rest is data */
469         fwone->fws_data = data + sizeof(uint32_t);
470         fwone->fws_len = dlen - sizeof(uint32_t);
471
472         fws->fw_count++;
473         fws->fw_totlen += fwone->fws_len;
474
475         return 0;
476 }
477
478 struct iwm_tlv_calib_data {
479         uint32_t ucode_type;
480         struct iwm_tlv_calib_ctrl calib;
481 } __packed;
482
483 static int
484 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
485 {
486         const struct iwm_tlv_calib_data *def_calib = data;
487         uint32_t ucode_type = le32toh(def_calib->ucode_type);
488
489         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
490                 device_printf(sc->sc_dev,
491                     "Wrong ucode_type %u for default "
492                     "calibration.\n", ucode_type);
493                 return EINVAL;
494         }
495
496         sc->sc_default_calib[ucode_type].flow_trigger =
497             def_calib->calib.flow_trigger;
498         sc->sc_default_calib[ucode_type].event_trigger =
499             def_calib->calib.event_trigger;
500
501         return 0;
502 }
503
504 static void
505 iwm_fw_info_free(struct iwm_fw_info *fw)
506 {
507         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
508         fw->fw_fp = NULL;
509         /* don't touch fw->fw_status */
510         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
511 }
512
513 static int
514 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
515 {
516         struct iwm_fw_info *fw = &sc->sc_fw;
517         const struct iwm_tlv_ucode_header *uhdr;
518         struct iwm_ucode_tlv tlv;
519         enum iwm_ucode_tlv_type tlv_type;
520         const struct firmware *fwp;
521         const uint8_t *data;
522         int error = 0;
523         size_t len;
524
525         if (fw->fw_status == IWM_FW_STATUS_DONE &&
526             ucode_type != IWM_UCODE_TYPE_INIT)
527                 return 0;
528
529         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
530 #if defined(__DragonFly__)
531                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
532 #else
533                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
534 #endif
535         }
536         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
537
538         if (fw->fw_fp != NULL)
539                 iwm_fw_info_free(fw);
540
541         /*
542          * Load firmware into driver memory.
543          * fw_fp will be set.
544          */
545         IWM_UNLOCK(sc);
546         fwp = firmware_get(sc->sc_fwname);
547         IWM_LOCK(sc);
548         if (fwp == NULL) {
549                 device_printf(sc->sc_dev,
550                     "could not read firmware %s (error %d)\n",
551                     sc->sc_fwname, error);
552                 goto out;
553         }
554         fw->fw_fp = fwp;
555
556         /* (Re-)Initialize default values. */
557         sc->sc_capaflags = 0;
558         sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
559         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
560         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
561
562         /*
563          * Parse firmware contents
564          */
565
566         uhdr = (const void *)fw->fw_fp->data;
567         if (*(const uint32_t *)fw->fw_fp->data != 0
568             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
569                 device_printf(sc->sc_dev, "invalid firmware %s\n",
570                     sc->sc_fwname);
571                 error = EINVAL;
572                 goto out;
573         }
574
575         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
576             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
577             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
578             IWM_UCODE_API(le32toh(uhdr->ver)));
579         data = uhdr->data;
580         len = fw->fw_fp->datasize - sizeof(*uhdr);
581
582         while (len >= sizeof(tlv)) {
583                 size_t tlv_len;
584                 const void *tlv_data;
585
586                 memcpy(&tlv, data, sizeof(tlv));
587                 tlv_len = le32toh(tlv.length);
588                 tlv_type = le32toh(tlv.type);
589
590                 len -= sizeof(tlv);
591                 data += sizeof(tlv);
592                 tlv_data = data;
593
594                 if (len < tlv_len) {
595                         device_printf(sc->sc_dev,
596                             "firmware too short: %zu bytes\n",
597                             len);
598                         error = EINVAL;
599                         goto parse_out;
600                 }
601
602                 switch ((int)tlv_type) {
603                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
604                         if (tlv_len < sizeof(uint32_t)) {
605                                 device_printf(sc->sc_dev,
606                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
607                                     __func__,
608                                     (int) tlv_len);
609                                 error = EINVAL;
610                                 goto parse_out;
611                         }
612                         sc->sc_capa_max_probe_len
613                             = le32toh(*(const uint32_t *)tlv_data);
614                         /* limit it to something sensible */
615                         if (sc->sc_capa_max_probe_len >
616                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
617                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
618                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
619                                     "ridiculous\n", __func__);
620                                 error = EINVAL;
621                                 goto parse_out;
622                         }
623                         break;
624                 case IWM_UCODE_TLV_PAN:
625                         if (tlv_len) {
626                                 device_printf(sc->sc_dev,
627                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
628                                     __func__,
629                                     (int) tlv_len);
630                                 error = EINVAL;
631                                 goto parse_out;
632                         }
633                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
634                         break;
635                 case IWM_UCODE_TLV_FLAGS:
636                         if (tlv_len < sizeof(uint32_t)) {
637                                 device_printf(sc->sc_dev,
638                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
639                                     __func__,
640                                     (int) tlv_len);
641                                 error = EINVAL;
642                                 goto parse_out;
643                         }
644                         /*
645                          * Apparently there can be many flags, but Linux driver
646                          * parses only the first one, and so do we.
647                          *
648                          * XXX: why does this override IWM_UCODE_TLV_PAN?
649                          * Intentional or a bug?  Observations from
650                          * current firmware file:
651                          *  1) TLV_PAN is parsed first
652                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
653                          * ==> this resets TLV_PAN to itself... hnnnk
654                          */
655                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
656                         break;
657                 case IWM_UCODE_TLV_CSCHEME:
658                         if ((error = iwm_store_cscheme(sc,
659                             tlv_data, tlv_len)) != 0) {
660                                 device_printf(sc->sc_dev,
661                                     "%s: iwm_store_cscheme(): returned %d\n",
662                                     __func__,
663                                     error);
664                                 goto parse_out;
665                         }
666                         break;
667                 case IWM_UCODE_TLV_NUM_OF_CPU: {
668                         uint32_t num_cpu;
669                         if (tlv_len != sizeof(uint32_t)) {
670                                 device_printf(sc->sc_dev,
671                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
672                                     __func__,
673                                     (int) tlv_len);
674                                 error = EINVAL;
675                                 goto parse_out;
676                         }
677                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
678                         if (num_cpu < 1 || num_cpu > 2) {
679                                 device_printf(sc->sc_dev,
680                                     "%s: Driver supports only 1 or 2 CPUs\n",
681                                     __func__);
682                                 error = EINVAL;
683                                 goto parse_out;
684                         }
685                         break;
686                 }
687                 case IWM_UCODE_TLV_SEC_RT:
688                         if ((error = iwm_firmware_store_section(sc,
689                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
690                                 device_printf(sc->sc_dev,
691                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
692                                     __func__,
693                                     error);
694                                 goto parse_out;
695                         }
696                         break;
697                 case IWM_UCODE_TLV_SEC_INIT:
698                         if ((error = iwm_firmware_store_section(sc,
699                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
700                                 device_printf(sc->sc_dev,
701                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
702                                     __func__,
703                                     error);
704                                 goto parse_out;
705                         }
706                         break;
707                 case IWM_UCODE_TLV_SEC_WOWLAN:
708                         if ((error = iwm_firmware_store_section(sc,
709                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
710                                 device_printf(sc->sc_dev,
711                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
712                                     __func__,
713                                     error);
714                                 goto parse_out;
715                         }
716                         break;
717                 case IWM_UCODE_TLV_DEF_CALIB:
718                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
719                                 device_printf(sc->sc_dev,
720                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
721                                     __func__,
722                                     (int) tlv_len,
723                                     (int) sizeof(struct iwm_tlv_calib_data));
724                                 error = EINVAL;
725                                 goto parse_out;
726                         }
727                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
728                                 device_printf(sc->sc_dev,
729                                     "%s: iwm_set_default_calib() failed: %d\n",
730                                     __func__,
731                                     error);
732                                 goto parse_out;
733                         }
734                         break;
735                 case IWM_UCODE_TLV_PHY_SKU:
736                         if (tlv_len != sizeof(uint32_t)) {
737                                 error = EINVAL;
738                                 device_printf(sc->sc_dev,
739                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
740                                     __func__,
741                                     (int) tlv_len);
742                                 goto parse_out;
743                         }
744                         sc->sc_fw_phy_config =
745                             le32toh(*(const uint32_t *)tlv_data);
746                         break;
747
748                 case IWM_UCODE_TLV_API_CHANGES_SET: {
749                         const struct iwm_ucode_api *api;
750                         if (tlv_len != sizeof(*api)) {
751                                 error = EINVAL;
752                                 goto parse_out;
753                         }
754                         api = (const struct iwm_ucode_api *)tlv_data;
755                         /* Flags may exceed 32 bits in future firmware. */
756                         if (le32toh(api->api_index) > 0) {
757                                 device_printf(sc->sc_dev,
758                                     "unsupported API index %d\n",
759                                     le32toh(api->api_index));
760                                 goto parse_out;
761                         }
762                         sc->sc_ucode_api = le32toh(api->api_flags);
763                         break;
764                 }
765
766                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
767                         const struct iwm_ucode_capa *capa;
768                         int idx, i;
769                         if (tlv_len != sizeof(*capa)) {
770                                 error = EINVAL;
771                                 goto parse_out;
772                         }
773                         capa = (const struct iwm_ucode_capa *)tlv_data;
774                         idx = le32toh(capa->api_index);
775                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
776                                 device_printf(sc->sc_dev,
777                                     "unsupported API index %d\n", idx);
778                                 goto parse_out;
779                         }
780                         for (i = 0; i < 32; i++) {
781                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
782                                         continue;
783                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
784                         }
785                         break;
786                 }
787
788                 case 48: /* undocumented TLV */
789                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
790                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
791                         /* ignore, not used by current driver */
792                         break;
793
794                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
795                         if ((error = iwm_firmware_store_section(sc,
796                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
797                             tlv_len)) != 0)
798                                 goto parse_out;
799                         break;
800
801                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
802                         if (tlv_len != sizeof(uint32_t)) {
803                                 error = EINVAL;
804                                 goto parse_out;
805                         }
806                         sc->sc_capa_n_scan_channels =
807                           le32toh(*(const uint32_t *)tlv_data);
808                         break;
809
810                 case IWM_UCODE_TLV_FW_VERSION:
811                         if (tlv_len != sizeof(uint32_t) * 3) {
812                                 error = EINVAL;
813                                 goto parse_out;
814                         }
815                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
816                             "%d.%d.%d",
817                             le32toh(((const uint32_t *)tlv_data)[0]),
818                             le32toh(((const uint32_t *)tlv_data)[1]),
819                             le32toh(((const uint32_t *)tlv_data)[2]));
820                         break;
821
822                 default:
823                         device_printf(sc->sc_dev,
824                             "%s: unknown firmware section %d, abort\n",
825                             __func__, tlv_type);
826                         error = EINVAL;
827                         goto parse_out;
828                 }
829
830                 len -= roundup(tlv_len, 4);
831                 data += roundup(tlv_len, 4);
832         }
833
834         KASSERT(error == 0, ("unhandled error"));
835
836  parse_out:
837         if (error) {
838                 device_printf(sc->sc_dev, "firmware parse error %d, "
839                     "section type %d\n", error, tlv_type);
840         }
841
842         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
843                 device_printf(sc->sc_dev,
844                     "device uses unsupported power ops\n");
845                 error = ENOTSUP;
846         }
847
848  out:
849         if (error) {
850                 fw->fw_status = IWM_FW_STATUS_NONE;
851                 if (fw->fw_fp != NULL)
852                         iwm_fw_info_free(fw);
853         } else
854                 fw->fw_status = IWM_FW_STATUS_DONE;
855         wakeup(&sc->sc_fw);
856
857         return error;
858 }
859
860 /*
861  * DMA resource routines
862  */
863
864 #if !defined(__DragonFly__)
865 static void
866 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
867 {
868         if (error != 0)
869                 return;
870         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
871         *(bus_addr_t *)arg = segs[0].ds_addr;
872 }
873 #endif
874
875 static int
876 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
877     bus_size_t size, bus_size_t alignment)
878 {
879         int error;
880
881         dma->tag = NULL;
882         dma->map = NULL;
883         dma->size = size;
884         dma->vaddr = NULL;
885
886 #if defined(__DragonFly__)
887         bus_dmamem_t dmem;
888         error = bus_dmamem_coherent(tag, alignment, 0,
889                                     BUS_SPACE_MAXADDR_32BIT,
890                                     BUS_SPACE_MAXADDR,
891                                     size, BUS_DMA_NOWAIT, &dmem);
892         if (error != 0)
893                 goto fail;
894
895         dma->tag = dmem.dmem_tag;
896         dma->map = dmem.dmem_map;
897         dma->vaddr = dmem.dmem_addr;
898         dma->paddr = dmem.dmem_busaddr;
899 #else
900         error = bus_dma_tag_create(tag, alignment,
901             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
902             1, size, 0, NULL, NULL, &dma->tag);
903         if (error != 0)
904                 goto fail;
905
906         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
907             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
908         if (error != 0)
909                 goto fail;
910
911         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
912             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
913         if (error != 0) {
914                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
915                 dma->vaddr = NULL;
916                 goto fail;
917         }
918 #endif
919
920         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
921
922         return 0;
923
924 fail:
925         iwm_dma_contig_free(dma);
926
927         return error;
928 }
929
930 static void
931 iwm_dma_contig_free(struct iwm_dma_info *dma)
932 {
933         if (dma->vaddr != NULL) {
934                 bus_dmamap_sync(dma->tag, dma->map,
935                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
936                 bus_dmamap_unload(dma->tag, dma->map);
937                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
938                 dma->vaddr = NULL;
939         }
940         if (dma->tag != NULL) {
941                 bus_dma_tag_destroy(dma->tag);
942                 dma->tag = NULL;
943         }
944 }
945
946 /* fwmem is used to load firmware onto the card */
947 static int
948 iwm_alloc_fwmem(struct iwm_softc *sc)
949 {
950         /* Must be aligned on a 16-byte boundary. */
951         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
952             sc->sc_fwdmasegsz, 16);
953 }
954
955 /* tx scheduler rings.  not used? */
956 static int
957 iwm_alloc_sched(struct iwm_softc *sc)
958 {
959         /* TX scheduler rings must be aligned on a 1KB boundary. */
960         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
961             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
962 }
963
964 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
965 static int
966 iwm_alloc_kw(struct iwm_softc *sc)
967 {
968         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
969 }
970
971 /* interrupt cause table */
972 static int
973 iwm_alloc_ict(struct iwm_softc *sc)
974 {
975         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
976             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
977 }
978
979 static int
980 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
981 {
982         bus_size_t size;
983         int i, error;
984
985         ring->cur = 0;
986
987         /* Allocate RX descriptors (256-byte aligned). */
988         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
989         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
990         if (error != 0) {
991                 device_printf(sc->sc_dev,
992                     "could not allocate RX ring DMA memory\n");
993                 goto fail;
994         }
995         ring->desc = ring->desc_dma.vaddr;
996
997         /* Allocate RX status area (16-byte aligned). */
998         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
999             sizeof(*ring->stat), 16);
1000         if (error != 0) {
1001                 device_printf(sc->sc_dev,
1002                     "could not allocate RX status DMA memory\n");
1003                 goto fail;
1004         }
1005         ring->stat = ring->stat_dma.vaddr;
1006
1007         /* Create RX buffer DMA tag. */
1008 #if defined(__DragonFly__)
1009         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1010                                    0,
1011                                    BUS_SPACE_MAXADDR_32BIT,
1012                                    BUS_SPACE_MAXADDR,
1013                                    NULL, NULL,
1014                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1015                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1016 #else
1017         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1018             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1019             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1020 #endif
1021         if (error != 0) {
1022                 device_printf(sc->sc_dev,
1023                     "%s: could not create RX buf DMA tag, error %d\n",
1024                     __func__, error);
1025                 goto fail;
1026         }
1027
1028         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1029         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1030         if (error != 0) {
1031                 device_printf(sc->sc_dev,
1032                     "%s: could not create RX buf DMA map, error %d\n",
1033                     __func__, error);
1034                 goto fail;
1035         }
1036         /*
1037          * Allocate and map RX buffers.
1038          */
1039         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1040                 struct iwm_rx_data *data = &ring->data[i];
1041                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1042                 if (error != 0) {
1043                         device_printf(sc->sc_dev,
1044                             "%s: could not create RX buf DMA map, error %d\n",
1045                             __func__, error);
1046                         goto fail;
1047                 }
1048                 data->m = NULL;
1049
1050                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1051                         goto fail;
1052                 }
1053         }
1054         return 0;
1055
1056 fail:   iwm_free_rx_ring(sc, ring);
1057         return error;
1058 }
1059
1060 static void
1061 iwm_disable_rx_dma(struct iwm_softc *sc)
1062 {
1063         /* XXX conditional nic locks are stupid */
1064         /* XXX print out if we can't lock the NIC? */
1065         if (iwm_nic_lock(sc)) {
1066                 /* XXX handle if RX stop doesn't finish? */
1067                 (void) iwm_pcie_rx_stop(sc);
1068                 iwm_nic_unlock(sc);
1069         }
1070 }
1071
1072 static void
1073 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1074 {
1075         /* Reset the ring state */
1076         ring->cur = 0;
1077
1078         /*
1079          * The hw rx ring index in shared memory must also be cleared,
1080          * otherwise the discrepancy can cause reprocessing chaos.
1081          */
1082         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1083 }
1084
1085 static void
1086 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1087 {
1088         int i;
1089
1090         iwm_dma_contig_free(&ring->desc_dma);
1091         iwm_dma_contig_free(&ring->stat_dma);
1092
1093         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1094                 struct iwm_rx_data *data = &ring->data[i];
1095
1096                 if (data->m != NULL) {
1097                         bus_dmamap_sync(ring->data_dmat, data->map,
1098                             BUS_DMASYNC_POSTREAD);
1099                         bus_dmamap_unload(ring->data_dmat, data->map);
1100                         m_freem(data->m);
1101                         data->m = NULL;
1102                 }
1103                 if (data->map != NULL) {
1104                         bus_dmamap_destroy(ring->data_dmat, data->map);
1105                         data->map = NULL;
1106                 }
1107         }
1108         if (ring->spare_map != NULL) {
1109                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1110                 ring->spare_map = NULL;
1111         }
1112         if (ring->data_dmat != NULL) {
1113                 bus_dma_tag_destroy(ring->data_dmat);
1114                 ring->data_dmat = NULL;
1115         }
1116 }
1117
1118 static int
1119 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1120 {
1121         bus_addr_t paddr;
1122         bus_size_t size;
1123         size_t maxsize;
1124         int nsegments;
1125         int i, error;
1126
1127         ring->qid = qid;
1128         ring->queued = 0;
1129         ring->cur = 0;
1130
1131         /* Allocate TX descriptors (256-byte aligned). */
1132         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1133         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1134         if (error != 0) {
1135                 device_printf(sc->sc_dev,
1136                     "could not allocate TX ring DMA memory\n");
1137                 goto fail;
1138         }
1139         ring->desc = ring->desc_dma.vaddr;
1140
1141         /*
1142          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1143          * to allocate commands space for other rings.
1144          */
1145         if (qid > IWM_MVM_CMD_QUEUE)
1146                 return 0;
1147
1148         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1149         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1150         if (error != 0) {
1151                 device_printf(sc->sc_dev,
1152                     "could not allocate TX cmd DMA memory\n");
1153                 goto fail;
1154         }
1155         ring->cmd = ring->cmd_dma.vaddr;
1156
1157         /* FW commands may require more mapped space than packets. */
1158         if (qid == IWM_MVM_CMD_QUEUE) {
1159                 maxsize = IWM_RBUF_SIZE;
1160                 nsegments = 1;
1161         } else {
1162                 maxsize = MCLBYTES;
1163                 nsegments = IWM_MAX_SCATTER - 2;
1164         }
1165
1166 #if defined(__DragonFly__)
1167         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1168                                    0,
1169                                    BUS_SPACE_MAXADDR_32BIT,
1170                                    BUS_SPACE_MAXADDR,
1171                                    NULL, NULL,
1172                                    maxsize, nsegments, maxsize,
1173                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1174 #else
1175         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1176             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1177             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1178 #endif
1179         if (error != 0) {
1180                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1181                 goto fail;
1182         }
1183
1184         paddr = ring->cmd_dma.paddr;
1185         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1186                 struct iwm_tx_data *data = &ring->data[i];
1187
1188                 data->cmd_paddr = paddr;
1189                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1190                     + offsetof(struct iwm_tx_cmd, scratch);
1191                 paddr += sizeof(struct iwm_device_cmd);
1192
1193                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1194                 if (error != 0) {
1195                         device_printf(sc->sc_dev,
1196                             "could not create TX buf DMA map\n");
1197                         goto fail;
1198                 }
1199         }
1200         KASSERT(paddr == ring->cmd_dma.paddr + size,
1201             ("invalid physical address"));
1202         return 0;
1203
1204 fail:   iwm_free_tx_ring(sc, ring);
1205         return error;
1206 }
1207
1208 static void
1209 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1210 {
1211         int i;
1212
1213         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1214                 struct iwm_tx_data *data = &ring->data[i];
1215
1216                 if (data->m != NULL) {
1217                         bus_dmamap_sync(ring->data_dmat, data->map,
1218                             BUS_DMASYNC_POSTWRITE);
1219                         bus_dmamap_unload(ring->data_dmat, data->map);
1220                         m_freem(data->m);
1221                         data->m = NULL;
1222                 }
1223         }
1224         /* Clear TX descriptors. */
1225         memset(ring->desc, 0, ring->desc_dma.size);
1226         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1227             BUS_DMASYNC_PREWRITE);
1228         sc->qfullmsk &= ~(1 << ring->qid);
1229         ring->queued = 0;
1230         ring->cur = 0;
1231 }
1232
1233 static void
1234 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1235 {
1236         int i;
1237
1238         iwm_dma_contig_free(&ring->desc_dma);
1239         iwm_dma_contig_free(&ring->cmd_dma);
1240
1241         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1242                 struct iwm_tx_data *data = &ring->data[i];
1243
1244                 if (data->m != NULL) {
1245                         bus_dmamap_sync(ring->data_dmat, data->map,
1246                             BUS_DMASYNC_POSTWRITE);
1247                         bus_dmamap_unload(ring->data_dmat, data->map);
1248                         m_freem(data->m);
1249                         data->m = NULL;
1250                 }
1251                 if (data->map != NULL) {
1252                         bus_dmamap_destroy(ring->data_dmat, data->map);
1253                         data->map = NULL;
1254                 }
1255         }
1256         if (ring->data_dmat != NULL) {
1257                 bus_dma_tag_destroy(ring->data_dmat);
1258                 ring->data_dmat = NULL;
1259         }
1260 }
1261
1262 /*
1263  * High-level hardware frobbing routines
1264  */
1265
1266 static void
1267 iwm_enable_interrupts(struct iwm_softc *sc)
1268 {
1269         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1270         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1271 }
1272
1273 static void
1274 iwm_restore_interrupts(struct iwm_softc *sc)
1275 {
1276         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1277 }
1278
1279 static void
1280 iwm_disable_interrupts(struct iwm_softc *sc)
1281 {
1282         /* disable interrupts */
1283         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1284
1285         /* acknowledge all interrupts */
1286         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1287         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1288 }
1289
1290 static void
1291 iwm_ict_reset(struct iwm_softc *sc)
1292 {
1293         iwm_disable_interrupts(sc);
1294
1295         /* Reset ICT table. */
1296         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1297         sc->ict_cur = 0;
1298
1299         /* Set physical address of ICT table (4KB aligned). */
1300         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1301             IWM_CSR_DRAM_INT_TBL_ENABLE
1302             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1303             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1304             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1305
1306         /* Switch to ICT interrupt mode in driver. */
1307         sc->sc_flags |= IWM_FLAG_USE_ICT;
1308
1309         /* Re-enable interrupts. */
1310         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1311         iwm_enable_interrupts(sc);
1312 }
1313
1314 /*
1315  * Since this .. hard-resets things, it's time to actually
1316  * mark the first vap (if any) as having no mac context.
1317  * It's annoying, but since the driver is potentially being
1318  * stop/start'ed whilst active (thanks openbsd port!) we
1319  * have to correctly track this.
1320  */
1321 static void
1322 iwm_stop_device(struct iwm_softc *sc)
1323 {
1324         struct ieee80211com *ic = &sc->sc_ic;
1325         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1326         int chnl, qid;
1327         uint32_t mask = 0;
1328
1329         /* tell the device to stop sending interrupts */
1330         iwm_disable_interrupts(sc);
1331
1332         /*
1333          * FreeBSD-local: mark the first vap as not-uploaded,
1334          * so the next transition through auth/assoc
1335          * will correctly populate the MAC context.
1336          */
1337         if (vap) {
1338                 struct iwm_vap *iv = IWM_VAP(vap);
1339                 iv->is_uploaded = 0;
1340         }
1341
1342         /* device going down, Stop using ICT table */
1343         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1344
1345         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1346
1347         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1348
1349         if (iwm_nic_lock(sc)) {
1350                 /* Stop each Tx DMA channel */
1351                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1352                         IWM_WRITE(sc,
1353                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1354                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1355                 }
1356
1357                 /* Wait for DMA channels to be idle */
1358                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1359                     5000)) {
1360                         device_printf(sc->sc_dev,
1361                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1362                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1363                 }
1364                 iwm_nic_unlock(sc);
1365         }
1366         iwm_disable_rx_dma(sc);
1367
1368         /* Stop RX ring. */
1369         iwm_reset_rx_ring(sc, &sc->rxq);
1370
1371         /* Reset all TX rings. */
1372         for (qid = 0; qid < nitems(sc->txq); qid++)
1373                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1374
1375         /*
1376          * Power-down device's busmaster DMA clocks
1377          */
1378         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1379         DELAY(5);
1380
1381         /* Make sure (redundant) we've released our request to stay awake */
1382         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1383             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1384
1385         /* Stop the device, and put it in low power state */
1386         iwm_apm_stop(sc);
1387
1388         /* stop and reset the on-board processor */
1389         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1390         DELAY(1000);
1391
1392         /*
1393          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1394          * This is a bug in certain verions of the hardware.
1395          * Certain devices also keep sending HW RF kill interrupt all
1396          * the time, unless the interrupt is ACKed even if the interrupt
1397          * should be masked. Re-ACK all the interrupts here.
1398          */
1399         iwm_disable_interrupts(sc);
1400
1401         /*
1402          * Even if we stop the HW, we still want the RF kill
1403          * interrupt
1404          */
1405         iwm_enable_rfkill_int(sc);
1406         iwm_check_rfkill(sc);
1407 }
1408
1409 static void
1410 iwm_mvm_nic_config(struct iwm_softc *sc)
1411 {
1412         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1413         uint32_t reg_val = 0;
1414
1415         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1416             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1417         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1418             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1419         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1420             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1421
1422         /* SKU control */
1423         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1424             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1425         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1426             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1427
1428         /* radio configuration */
1429         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1430         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1431         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1432
1433         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1434
1435         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1436             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1437             radio_cfg_step, radio_cfg_dash);
1438
1439         /*
1440          * W/A : NIC is stuck in a reset state after Early PCIe power off
1441          * (PCIe power is lost before PERST# is asserted), causing ME FW
1442          * to lose ownership and not being able to obtain it back.
1443          */
1444         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1445                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1446                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1447                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1448         }
1449 }
1450
1451 static int
1452 iwm_nic_rx_init(struct iwm_softc *sc)
1453 {
1454         if (!iwm_nic_lock(sc))
1455                 return EBUSY;
1456
1457         /*
1458          * Initialize RX ring.  This is from the iwn driver.
1459          */
1460         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1461
1462         /* stop DMA */
1463         iwm_disable_rx_dma(sc);
1464         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1465         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1466         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1467         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1468
1469         /* Set physical address of RX ring (256-byte aligned). */
1470         IWM_WRITE(sc,
1471             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1472
1473         /* Set physical address of RX status (16-byte aligned). */
1474         IWM_WRITE(sc,
1475             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1476
1477 #if defined(__DragonFly__)
1478         /* Force serialization (probably not needed but don't trust the HW) */
1479         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1480 #endif
1481
1482         /* Enable RX. */
1483         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1484             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1485             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1486             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1487             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1488             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1489             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1490             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1491
1492         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1493
1494         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1495         if (sc->host_interrupt_operation_mode)
1496                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1497
1498         /*
1499          * Thus sayeth el jefe (iwlwifi) via a comment:
1500          *
1501          * This value should initially be 0 (before preparing any
1502          * RBs), should be 8 after preparing the first 8 RBs (for example)
1503          */
1504         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1505
1506         iwm_nic_unlock(sc);
1507
1508         return 0;
1509 }
1510
1511 static int
1512 iwm_nic_tx_init(struct iwm_softc *sc)
1513 {
1514         int qid;
1515
1516         if (!iwm_nic_lock(sc))
1517                 return EBUSY;
1518
1519         /* Deactivate TX scheduler. */
1520         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1521
1522         /* Set physical address of "keep warm" page (16-byte aligned). */
1523         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1524
1525         /* Initialize TX rings. */
1526         for (qid = 0; qid < nitems(sc->txq); qid++) {
1527                 struct iwm_tx_ring *txq = &sc->txq[qid];
1528
1529                 /* Set physical address of TX ring (256-byte aligned). */
1530                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1531                     txq->desc_dma.paddr >> 8);
1532                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1533                     "%s: loading ring %d descriptors (%p) at %lx\n",
1534                     __func__,
1535                     qid, txq->desc,
1536                     (unsigned long) (txq->desc_dma.paddr >> 8));
1537         }
1538
1539         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1540
1541         iwm_nic_unlock(sc);
1542
1543         return 0;
1544 }
1545
1546 static int
1547 iwm_nic_init(struct iwm_softc *sc)
1548 {
1549         int error;
1550
1551         iwm_apm_init(sc);
1552         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1553                 iwm_set_pwr(sc);
1554
1555         iwm_mvm_nic_config(sc);
1556
1557         if ((error = iwm_nic_rx_init(sc)) != 0)
1558                 return error;
1559
1560         /*
1561          * Ditto for TX, from iwn
1562          */
1563         if ((error = iwm_nic_tx_init(sc)) != 0)
1564                 return error;
1565
1566         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1567             "%s: shadow registers enabled\n", __func__);
1568         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1569
1570         return 0;
1571 }
1572
1573 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1574         IWM_MVM_TX_FIFO_VO,
1575         IWM_MVM_TX_FIFO_VI,
1576         IWM_MVM_TX_FIFO_BE,
1577         IWM_MVM_TX_FIFO_BK,
1578 };
1579
1580 static int
1581 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1582 {
1583         if (!iwm_nic_lock(sc)) {
1584                 device_printf(sc->sc_dev,
1585                     "%s: cannot enable txq %d\n",
1586                     __func__,
1587                     qid);
1588                 return EBUSY;
1589         }
1590
1591         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1592
1593         if (qid == IWM_MVM_CMD_QUEUE) {
1594                 /* unactivate before configuration */
1595                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1596                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1597                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1598
1599                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1600
1601                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1602
1603                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1604                 /* Set scheduler window size and frame limit. */
1605                 iwm_write_mem32(sc,
1606                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1607                     sizeof(uint32_t),
1608                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1609                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1610                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1611                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1612
1613                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1614                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1615                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1616                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1617                     IWM_SCD_QUEUE_STTS_REG_MSK);
1618         } else {
1619                 struct iwm_scd_txq_cfg_cmd cmd;
1620                 int error;
1621
1622                 iwm_nic_unlock(sc);
1623
1624                 memset(&cmd, 0, sizeof(cmd));
1625                 cmd.scd_queue = qid;
1626                 cmd.enable = 1;
1627                 cmd.sta_id = sta_id;
1628                 cmd.tx_fifo = fifo;
1629                 cmd.aggregate = 0;
1630                 cmd.window = IWM_FRAME_LIMIT;
1631
1632                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1633                     sizeof(cmd), &cmd);
1634                 if (error) {
1635                         device_printf(sc->sc_dev,
1636                             "cannot enable txq %d\n", qid);
1637                         return error;
1638                 }
1639
1640                 if (!iwm_nic_lock(sc))
1641                         return EBUSY;
1642         }
1643
1644         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1645             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1646
1647         iwm_nic_unlock(sc);
1648
1649         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1650             __func__, qid, fifo);
1651
1652         return 0;
1653 }
1654
1655 static int
1656 iwm_post_alive(struct iwm_softc *sc)
1657 {
1658         int nwords;
1659         int error, chnl;
1660         uint32_t base;
1661
1662         if (!iwm_nic_lock(sc))
1663                 return EBUSY;
1664
1665         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1666         if (sc->sched_base != base) {
1667                 device_printf(sc->sc_dev,
1668                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1669                     __func__, sc->sched_base, base);
1670         }
1671
1672         iwm_ict_reset(sc);
1673
1674         /* Clear TX scheduler state in SRAM. */
1675         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1676             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1677             / sizeof(uint32_t);
1678         error = iwm_write_mem(sc,
1679             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1680             NULL, nwords);
1681         if (error)
1682                 goto out;
1683
1684         /* Set physical address of TX scheduler rings (1KB aligned). */
1685         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1686
1687         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1688
1689         iwm_nic_unlock(sc);
1690
1691         /* enable command channel */
1692         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1693         if (error)
1694                 return error;
1695
1696         if (!iwm_nic_lock(sc))
1697                 return EBUSY;
1698
1699         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1700
1701         /* Enable DMA channels. */
1702         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1703                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1704                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1705                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1706         }
1707
1708         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1709             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1710
1711         /* Enable L1-Active */
1712         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1713                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1714                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1715         }
1716
1717  out:
1718         iwm_nic_unlock(sc);
1719         return error;
1720 }
1721
1722 /*
1723  * NVM read access and content parsing.  We do not support
1724  * external NVM or writing NVM.
1725  * iwlwifi/mvm/nvm.c
1726  */
1727
1728 /* list of NVM sections we are allowed/need to read */
1729 const int nvm_to_read[] = {
1730         IWM_NVM_SECTION_TYPE_HW,
1731         IWM_NVM_SECTION_TYPE_SW,
1732         IWM_NVM_SECTION_TYPE_REGULATORY,
1733         IWM_NVM_SECTION_TYPE_CALIBRATION,
1734         IWM_NVM_SECTION_TYPE_PRODUCTION,
1735         IWM_NVM_SECTION_TYPE_HW_8000,
1736         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1737         IWM_NVM_SECTION_TYPE_PHY_SKU,
1738 };
1739
1740 /* Default NVM size to read */
1741 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1742 #define IWM_MAX_NVM_SECTION_SIZE        8192
1743
1744 #define IWM_NVM_WRITE_OPCODE 1
1745 #define IWM_NVM_READ_OPCODE 0
1746
1747 /* load nvm chunk response */
1748 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1749 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1750
1751 static int
1752 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1753         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1754 {
1755         offset = 0;
1756         struct iwm_nvm_access_cmd nvm_access_cmd = {
1757                 .offset = htole16(offset),
1758                 .length = htole16(length),
1759                 .type = htole16(section),
1760                 .op_code = IWM_NVM_READ_OPCODE,
1761         };
1762         struct iwm_nvm_access_resp *nvm_resp;
1763         struct iwm_rx_packet *pkt;
1764         struct iwm_host_cmd cmd = {
1765                 .id = IWM_NVM_ACCESS_CMD,
1766                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1767                     IWM_CMD_SEND_IN_RFKILL,
1768                 .data = { &nvm_access_cmd, },
1769         };
1770         int ret, offset_read;
1771         size_t bytes_read;
1772         uint8_t *resp_data;
1773
1774         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1775
1776         ret = iwm_send_cmd(sc, &cmd);
1777         if (ret) {
1778                 device_printf(sc->sc_dev,
1779                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1780                 return ret;
1781         }
1782
1783         pkt = cmd.resp_pkt;
1784         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1785                 device_printf(sc->sc_dev,
1786                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1787                     pkt->hdr.flags);
1788                 ret = EIO;
1789                 goto exit;
1790         }
1791
1792         /* Extract NVM response */
1793         nvm_resp = (void *)pkt->data;
1794
1795         ret = le16toh(nvm_resp->status);
1796         bytes_read = le16toh(nvm_resp->length);
1797         offset_read = le16toh(nvm_resp->offset);
1798         resp_data = nvm_resp->data;
1799         if (ret) {
1800                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1801                     "NVM access command failed with status %d\n", ret);
1802                 ret = EINVAL;
1803                 goto exit;
1804         }
1805
1806         if (offset_read != offset) {
1807                 device_printf(sc->sc_dev,
1808                     "NVM ACCESS response with invalid offset %d\n",
1809                     offset_read);
1810                 ret = EINVAL;
1811                 goto exit;
1812         }
1813
1814         if (bytes_read > length) {
1815                 device_printf(sc->sc_dev,
1816                     "NVM ACCESS response with too much data "
1817                     "(%d bytes requested, %zd bytes received)\n",
1818                     length, bytes_read);
1819                 ret = EINVAL;
1820                 goto exit;
1821         }
1822
1823         memcpy(data + offset, resp_data, bytes_read);
1824         *len = bytes_read;
1825
1826  exit:
1827         iwm_free_resp(sc, &cmd);
1828         return ret;
1829 }
1830
1831 /*
1832  * Reads an NVM section completely.
1833  * NICs prior to 7000 family don't have a real NVM, but just read
1834  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1835  * by uCode, we need to manually check in this case that we don't
1836  * overflow and try to read more than the EEPROM size.
1837  * For 7000 family NICs, we supply the maximal size we can read, and
1838  * the uCode fills the response with as much data as we can,
1839  * without overflowing, so no check is needed.
1840  */
1841 static int
1842 iwm_nvm_read_section(struct iwm_softc *sc,
1843         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1844 {
1845         uint16_t chunklen, seglen;
1846         int error = 0;
1847
1848         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1849             "reading NVM section %d\n", section);
1850
1851         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1852         *len = 0;
1853
1854         /* Read NVM chunks until exhausted (reading less than requested) */
1855         while (seglen == chunklen && *len < max_len) {
1856                 error = iwm_nvm_read_chunk(sc,
1857                     section, *len, chunklen, data, &seglen);
1858                 if (error) {
1859                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1860                             "Cannot read from NVM section "
1861                             "%d at offset %d\n", section, *len);
1862                         return error;
1863                 }
1864                 *len += seglen;
1865         }
1866
1867         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1868             "NVM section %d read completed (%d bytes, error=%d)\n",
1869             section, *len, error);
1870         return error;
1871 }
1872
1873 /* NVM offsets (in words) definitions */
1874 enum iwm_nvm_offsets {
1875         /* NVM HW-Section offset (in words) definitions */
1876         IWM_HW_ADDR = 0x15,
1877
1878 /* NVM SW-Section offset (in words) definitions */
1879         IWM_NVM_SW_SECTION = 0x1C0,
1880         IWM_NVM_VERSION = 0,
1881         IWM_RADIO_CFG = 1,
1882         IWM_SKU = 2,
1883         IWM_N_HW_ADDRS = 3,
1884         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1885
1886 /* NVM calibration section offset (in words) definitions */
1887         IWM_NVM_CALIB_SECTION = 0x2B8,
1888         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1889 };
1890
1891 enum iwm_8000_nvm_offsets {
1892         /* NVM HW-Section offset (in words) definitions */
1893         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1894         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1895         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1896         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1897         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1898
1899         /* NVM SW-Section offset (in words) definitions */
1900         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1901         IWM_NVM_VERSION_8000 = 0,
1902         IWM_RADIO_CFG_8000 = 0,
1903         IWM_SKU_8000 = 2,
1904         IWM_N_HW_ADDRS_8000 = 3,
1905
1906         /* NVM REGULATORY -Section offset (in words) definitions */
1907         IWM_NVM_CHANNELS_8000 = 0,
1908         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1909         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1910         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1911
1912         /* NVM calibration section offset (in words) definitions */
1913         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1914         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1915 };
1916
1917 /* SKU Capabilities (actual values from NVM definition) */
1918 enum nvm_sku_bits {
1919         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1920         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1921         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1922         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1923 };
1924
1925 /* radio config bits (actual values from NVM definition) */
1926 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1927 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1928 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1929 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1930 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1931 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1932
1933 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1934 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1935 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1936 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1937 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1938 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1939
1940 #define DEFAULT_MAX_TX_POWER 16
1941
1942 /**
1943  * enum iwm_nvm_channel_flags - channel flags in NVM
1944  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1945  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1946  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1947  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1948  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1949  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1950  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1951  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1952  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1953  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1954  */
1955 enum iwm_nvm_channel_flags {
1956         IWM_NVM_CHANNEL_VALID = (1 << 0),
1957         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1958         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1959         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1960         IWM_NVM_CHANNEL_DFS = (1 << 7),
1961         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1962         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1963         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1964         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1965 };
1966
1967 /*
1968  * Translate EEPROM flags to net80211.
1969  */
1970 static uint32_t
1971 iwm_eeprom_channel_flags(uint16_t ch_flags)
1972 {
1973         uint32_t nflags;
1974
1975         nflags = 0;
1976         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1977                 nflags |= IEEE80211_CHAN_PASSIVE;
1978         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1979                 nflags |= IEEE80211_CHAN_NOADHOC;
1980         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1981                 nflags |= IEEE80211_CHAN_DFS;
1982                 /* Just in case. */
1983                 nflags |= IEEE80211_CHAN_NOADHOC;
1984         }
1985
1986         return (nflags);
1987 }
1988
1989 static void
1990 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1991     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1992     const uint8_t bands[])
1993 {
1994         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1995         uint32_t nflags;
1996         uint16_t ch_flags;
1997         uint8_t ieee;
1998         int error;
1999
2000         for (; ch_idx < ch_num; ch_idx++) {
2001                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2002                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2003                         ieee = iwm_nvm_channels[ch_idx];
2004                 else
2005                         ieee = iwm_nvm_channels_8000[ch_idx];
2006
2007                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2008                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2009                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2010                             ieee, ch_flags,
2011                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2012                             "5.2" : "2.4");
2013                         continue;
2014                 }
2015
2016                 nflags = iwm_eeprom_channel_flags(ch_flags);
2017                 error = ieee80211_add_channel(chans, maxchans, nchans,
2018                     ieee, 0, 0, nflags, bands);
2019                 if (error != 0)
2020                         break;
2021
2022                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2023                     "Ch. %d Flags %x [%sGHz] - Added\n",
2024                     ieee, ch_flags,
2025                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2026                     "5.2" : "2.4");
2027         }
2028 }
2029
2030 static void
2031 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2032     struct ieee80211_channel chans[])
2033 {
2034         struct iwm_softc *sc = ic->ic_softc;
2035         struct iwm_nvm_data *data = &sc->sc_nvm;
2036         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2037         size_t ch_num;
2038
2039         memset(bands, 0, sizeof(bands));
2040         /* 1-13: 11b/g channels. */
2041         setbit(bands, IEEE80211_MODE_11B);
2042         setbit(bands, IEEE80211_MODE_11G);
2043         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2044             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2045
2046         /* 14: 11b channel only. */
2047         clrbit(bands, IEEE80211_MODE_11G);
2048         iwm_add_channel_band(sc, chans, maxchans, nchans,
2049             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2050
2051         if (data->sku_cap_band_52GHz_enable) {
2052                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2053                         ch_num = nitems(iwm_nvm_channels);
2054                 else
2055                         ch_num = nitems(iwm_nvm_channels_8000);
2056                 memset(bands, 0, sizeof(bands));
2057                 setbit(bands, IEEE80211_MODE_11A);
2058                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2059                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2060         }
2061 }
2062
2063 static void
2064 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2065         const uint16_t *mac_override, const uint16_t *nvm_hw)
2066 {
2067         const uint8_t *hw_addr;
2068
2069         if (mac_override) {
2070                 static const uint8_t reserved_mac[] = {
2071                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2072                 };
2073
2074                 hw_addr = (const uint8_t *)(mac_override +
2075                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2076
2077                 /*
2078                  * Store the MAC address from MAO section.
2079                  * No byte swapping is required in MAO section
2080                  */
2081                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2082
2083                 /*
2084                  * Force the use of the OTP MAC address in case of reserved MAC
2085                  * address in the NVM, or if address is given but invalid.
2086                  */
2087                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2088                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2089                     iwm_is_valid_ether_addr(data->hw_addr) &&
2090                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2091                         return;
2092
2093                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2094                     "%s: mac address from nvm override section invalid\n",
2095                     __func__);
2096         }
2097
2098         if (nvm_hw) {
2099                 /* read the mac address from WFMP registers */
2100                 uint32_t mac_addr0 =
2101                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2102                 uint32_t mac_addr1 =
2103                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2104
2105                 hw_addr = (const uint8_t *)&mac_addr0;
2106                 data->hw_addr[0] = hw_addr[3];
2107                 data->hw_addr[1] = hw_addr[2];
2108                 data->hw_addr[2] = hw_addr[1];
2109                 data->hw_addr[3] = hw_addr[0];
2110
2111                 hw_addr = (const uint8_t *)&mac_addr1;
2112                 data->hw_addr[4] = hw_addr[1];
2113                 data->hw_addr[5] = hw_addr[0];
2114
2115                 return;
2116         }
2117
2118         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2119         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2120 }
2121
2122 static int
2123 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2124             const uint16_t *phy_sku)
2125 {
2126         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2127                 return le16_to_cpup(nvm_sw + IWM_SKU);
2128
2129         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2130 }
2131
2132 static int
2133 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2134 {
2135         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2136                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2137         else
2138                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2139                                                 IWM_NVM_VERSION_8000));
2140 }
2141
2142 static int
2143 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2144                   const uint16_t *phy_sku)
2145 {
2146         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2147                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2148
2149         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2150 }
2151
2152 static int
2153 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2154 {
2155         int n_hw_addr;
2156
2157         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2158                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2159
2160         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2161
2162         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2163 }
2164
2165 static void
2166 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2167                   uint32_t radio_cfg)
2168 {
2169         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2170                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2171                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2172                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2173                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2174                 return;
2175         }
2176
2177         /* set the radio configuration for family 8000 */
2178         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2179         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2180         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2181         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2182         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2183         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2184 }
2185
2186 static int
2187 iwm_parse_nvm_data(struct iwm_softc *sc,
2188                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2189                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2190                    const uint16_t *phy_sku, const uint16_t *regulatory)
2191 {
2192         struct iwm_nvm_data *data = &sc->sc_nvm;
2193         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2194         uint32_t sku, radio_cfg;
2195
2196         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2197
2198         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2199         iwm_set_radio_cfg(sc, data, radio_cfg);
2200
2201         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2202         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2203         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2204         data->sku_cap_11n_enable = 0;
2205
2206         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2207
2208         /* The byte order is little endian 16 bit, meaning 214365 */
2209         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2210                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2211                 data->hw_addr[0] = hw_addr[1];
2212                 data->hw_addr[1] = hw_addr[0];
2213                 data->hw_addr[2] = hw_addr[3];
2214                 data->hw_addr[3] = hw_addr[2];
2215                 data->hw_addr[4] = hw_addr[5];
2216                 data->hw_addr[5] = hw_addr[4];
2217         } else {
2218                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2219         }
2220
2221         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2222                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2223                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2224         } else {
2225                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2226                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2227         }
2228         data->calib_version = 255;   /* TODO:
2229                                         this value will prevent some checks from
2230                                         failing, we need to check if this
2231                                         field is still needed, and if it does,
2232                                         where is it in the NVM */
2233
2234         return 0;
2235 }
2236
2237 static int
2238 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2239 {
2240         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2241
2242         /* Checking for required sections */
2243         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2244                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2245                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2246                         device_printf(sc->sc_dev,
2247                             "Can't parse empty OTP/NVM sections\n");
2248                         return ENOENT;
2249                 }
2250
2251                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2252         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2253                 /* SW and REGULATORY sections are mandatory */
2254                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2255                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2256                         device_printf(sc->sc_dev,
2257                             "Can't parse empty OTP/NVM sections\n");
2258                         return ENOENT;
2259                 }
2260                 /* MAC_OVERRIDE or at least HW section must exist */
2261                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2262                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2263                         device_printf(sc->sc_dev,
2264                             "Can't parse mac_address, empty sections\n");
2265                         return ENOENT;
2266                 }
2267
2268                 /* PHY_SKU section is mandatory in B0 */
2269                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2270                         device_printf(sc->sc_dev,
2271                             "Can't parse phy_sku in B0, empty sections\n");
2272                         return ENOENT;
2273                 }
2274
2275                 hw = (const uint16_t *)
2276                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2277         } else {
2278                 panic("unknown device family %d\n", sc->sc_device_family);
2279         }
2280
2281         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2282         calib = (const uint16_t *)
2283             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2284         regulatory = (const uint16_t *)
2285             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2286         mac_override = (const uint16_t *)
2287             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2288         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2289
2290         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2291             phy_sku, regulatory);
2292 }
2293
2294 static int
2295 iwm_nvm_init(struct iwm_softc *sc)
2296 {
2297         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2298         int i, section, error;
2299         uint16_t len;
2300         uint8_t *buf;
2301         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2302
2303         memset(nvm_sections, 0 , sizeof(nvm_sections));
2304
2305         buf = kmalloc(bufsz, M_DEVBUF, M_INTWAIT);
2306         if (buf == NULL)
2307                 return ENOMEM;
2308
2309         for (i = 0; i < nitems(nvm_to_read); i++) {
2310                 section = nvm_to_read[i];
2311                 KKASSERT(section <= nitems(nvm_sections));
2312
2313                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2314                 if (error) {
2315                         error = 0;
2316                         continue;
2317                 }
2318                 nvm_sections[section].data = kmalloc(len, M_DEVBUF, M_INTWAIT);
2319                 if (nvm_sections[section].data == NULL) {
2320                         error = ENOMEM;
2321                         break;
2322                 }
2323                 memcpy(nvm_sections[section].data, buf, len);
2324                 nvm_sections[section].length = len;
2325         }
2326         kfree(buf, M_DEVBUF);
2327         if (error == 0)
2328                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2329
2330         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2331                 if (nvm_sections[i].data != NULL)
2332                         kfree(nvm_sections[i].data, M_DEVBUF);
2333         }
2334
2335         return error;
2336 }
2337
2338 /*
2339  * Firmware loading gunk.  This is kind of a weird hybrid between the
2340  * iwn driver and the Linux iwlwifi driver.
2341  */
2342
2343 static int
2344 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2345         const uint8_t *section, uint32_t byte_cnt)
2346 {
2347         int error = EINVAL;
2348         uint32_t chunk_sz, offset;
2349
2350         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2351
2352         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2353                 uint32_t addr, len;
2354                 const uint8_t *data;
2355
2356                 addr = dst_addr + offset;
2357                 len = MIN(chunk_sz, byte_cnt - offset);
2358                 data = section + offset;
2359
2360                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2361                 if (error)
2362                         break;
2363         }
2364
2365         return error;
2366 }
2367
2368 static int
2369 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2370         const uint8_t *chunk, uint32_t byte_cnt)
2371 {
2372         struct iwm_dma_info *dma = &sc->fw_dma;
2373         int error;
2374
2375         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2376         memcpy(dma->vaddr, chunk, byte_cnt);
2377         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2378
2379         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2380             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2381                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2382                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2383         }
2384
2385         sc->sc_fw_chunk_done = 0;
2386
2387         if (!iwm_nic_lock(sc))
2388                 return EBUSY;
2389
2390         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2391             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2392         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2393             dst_addr);
2394         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2395             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2396         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2397             (iwm_get_dma_hi_addr(dma->paddr)
2398               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2399         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2400             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2401             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2402             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2403         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2404             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2405             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2406             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2407
2408         iwm_nic_unlock(sc);
2409
2410         /* wait 1s for this segment to load */
2411         error = 0;
2412         while (!sc->sc_fw_chunk_done) {
2413 #if defined(__DragonFly__)
2414                 error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2415 #else
2416                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2417 #endif
2418                 if (error)
2419                         break;
2420         }
2421
2422         if (!sc->sc_fw_chunk_done) {
2423                 device_printf(sc->sc_dev,
2424                     "fw chunk addr 0x%x len %d failed to load\n",
2425                     dst_addr, byte_cnt);
2426         }
2427
2428         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2429             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2430                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2431                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2432                 iwm_nic_unlock(sc);
2433         }
2434
2435         return error;
2436 }
2437
2438 int
2439 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2440     int cpu, int *first_ucode_section)
2441 {
2442         int shift_param;
2443         int i, error = 0, sec_num = 0x1;
2444         uint32_t val, last_read_idx = 0;
2445         const void *data;
2446         uint32_t dlen;
2447         uint32_t offset;
2448
2449         if (cpu == 1) {
2450                 shift_param = 0;
2451                 *first_ucode_section = 0;
2452         } else {
2453                 shift_param = 16;
2454                 (*first_ucode_section)++;
2455         }
2456
2457         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2458                 last_read_idx = i;
2459                 data = fws->fw_sect[i].fws_data;
2460                 dlen = fws->fw_sect[i].fws_len;
2461                 offset = fws->fw_sect[i].fws_devoff;
2462
2463                 /*
2464                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2465                  * CPU1 to CPU2.
2466                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2467                  * CPU2 non paged to CPU2 paging sec.
2468                  */
2469                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2470                     offset == IWM_PAGING_SEPARATOR_SECTION)
2471                         break;
2472
2473                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2474                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2475                     i, offset, dlen, cpu);
2476
2477                 if (dlen > sc->sc_fwdmasegsz) {
2478                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2479                             "chunk %d too large (%d bytes)\n", i, dlen);
2480                         error = EFBIG;
2481                 } else {
2482                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2483                 }
2484                 if (error) {
2485                         device_printf(sc->sc_dev,
2486                             "could not load firmware chunk %d (error %d)\n",
2487                             i, error);
2488                         return error;
2489                 }
2490
2491                 /* Notify the ucode of the loaded section number and status */
2492                 if (iwm_nic_lock(sc)) {
2493                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2494                         val = val | (sec_num << shift_param);
2495                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2496                         sec_num = (sec_num << 1) | 0x1;
2497                         iwm_nic_unlock(sc);
2498
2499                         /*
2500                          * The firmware won't load correctly without this delay.
2501                          */
2502                         DELAY(8000);
2503                 }
2504         }
2505
2506         *first_ucode_section = last_read_idx;
2507
2508         if (iwm_nic_lock(sc)) {
2509                 if (cpu == 1)
2510                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2511                 else
2512                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2513                 iwm_nic_unlock(sc);
2514         }
2515
2516         return 0;
2517 }
2518
2519 int
2520 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2521 {
2522         struct iwm_fw_sects *fws;
2523         int error = 0;
2524         int first_ucode_section;
2525
2526         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2527             ucode_type);
2528
2529         fws = &sc->sc_fw.fw_sects[ucode_type];
2530
2531         /* configure the ucode to be ready to get the secured image */
2532         /* release CPU reset */
2533         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2534
2535         /* load to FW the binary Secured sections of CPU1 */
2536         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2537         if (error)
2538                 return error;
2539
2540         /* load to FW the binary sections of CPU2 */
2541         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2542 }
2543
2544 static int
2545 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2546 {
2547         struct iwm_fw_sects *fws;
2548         int error, i;
2549         const void *data;
2550         uint32_t dlen;
2551         uint32_t offset;
2552
2553         sc->sc_uc.uc_intr = 0;
2554
2555         fws = &sc->sc_fw.fw_sects[ucode_type];
2556         for (i = 0; i < fws->fw_count; i++) {
2557                 data = fws->fw_sect[i].fws_data;
2558                 dlen = fws->fw_sect[i].fws_len;
2559                 offset = fws->fw_sect[i].fws_devoff;
2560                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2561                     "LOAD FIRMWARE type %d offset %u len %d\n",
2562                     ucode_type, offset, dlen);
2563                 if (dlen > sc->sc_fwdmasegsz) {
2564                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2565                             "chunk %d too large (%d bytes)\n", i, dlen);
2566                         error = EFBIG;
2567                 } else {
2568                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2569                 }
2570                 if (error) {
2571                         device_printf(sc->sc_dev,
2572                             "could not load firmware chunk %u of %u "
2573                             "(error=%d)\n", i, fws->fw_count, error);
2574                         return error;
2575                 }
2576         }
2577
2578         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2579
2580         return 0;
2581 }
2582
2583 static int
2584 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2585 {
2586         int error, w;
2587
2588         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2589                 error = iwm_load_firmware_8000(sc, ucode_type);
2590         else
2591                 error = iwm_load_firmware_7000(sc, ucode_type);
2592         if (error)
2593                 return error;
2594
2595         /* wait for the firmware to load */
2596         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2597 #if defined(__DragonFly__)
2598                 error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2599 #else
2600                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2601 #endif
2602         }
2603         if (error || !sc->sc_uc.uc_ok) {
2604                 device_printf(sc->sc_dev, "could not load firmware\n");
2605                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2606                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2607                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2608                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2609                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2610                 }
2611         }
2612
2613         /*
2614          * Give the firmware some time to initialize.
2615          * Accessing it too early causes errors.
2616          */
2617         lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2618
2619         return error;
2620 }
2621
2622 static int
2623 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2624 {
2625         int error;
2626
2627         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2628
2629         if ((error = iwm_nic_init(sc)) != 0) {
2630                 device_printf(sc->sc_dev, "unable to init nic\n");
2631                 return error;
2632         }
2633
2634         /* make sure rfkill handshake bits are cleared */
2635         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2636         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2637             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2638
2639         /* clear (again), then enable host interrupts */
2640         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2641         iwm_enable_interrupts(sc);
2642
2643         /* really make sure rfkill handshake bits are cleared */
2644         /* maybe we should write a few times more?  just to make sure */
2645         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2646         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2647
2648         /* Load the given image to the HW */
2649         return iwm_load_firmware(sc, ucode_type);
2650 }
2651
2652 static int
2653 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2654 {
2655         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2656                 .valid = htole32(valid_tx_ant),
2657         };
2658
2659         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2660             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2661 }
2662
2663 static int
2664 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2665 {
2666         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2667         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2668
2669         /* Set parameters */
2670         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2671         phy_cfg_cmd.calib_control.event_trigger =
2672             sc->sc_default_calib[ucode_type].event_trigger;
2673         phy_cfg_cmd.calib_control.flow_trigger =
2674             sc->sc_default_calib[ucode_type].flow_trigger;
2675
2676         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2677             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2678         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2679             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2680 }
2681
2682 static int
2683 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2684         enum iwm_ucode_type ucode_type)
2685 {
2686         enum iwm_ucode_type old_type = sc->sc_uc_current;
2687         int error;
2688
2689         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2690                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2691                         error);
2692                 return error;
2693         }
2694
2695         sc->sc_uc_current = ucode_type;
2696         error = iwm_start_fw(sc, ucode_type);
2697         if (error) {
2698                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2699                 sc->sc_uc_current = old_type;
2700                 return error;
2701         }
2702
2703         error = iwm_post_alive(sc);
2704         if (error) {
2705                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2706         }
2707         return error;
2708 }
2709
2710 /*
2711  * mvm misc bits
2712  */
2713
2714 static int
2715 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2716 {
2717         int error;
2718
2719         /* do not operate with rfkill switch turned on */
2720         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2721                 device_printf(sc->sc_dev,
2722                     "radio is disabled by hardware switch\n");
2723                 return EPERM;
2724         }
2725
2726         sc->sc_init_complete = 0;
2727         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2728             IWM_UCODE_TYPE_INIT)) != 0) {
2729                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2730                 return error;
2731         }
2732
2733         if (justnvm) {
2734                 if ((error = iwm_nvm_init(sc)) != 0) {
2735                         device_printf(sc->sc_dev, "failed to read nvm\n");
2736                         return error;
2737                 }
2738                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2739
2740                 return 0;
2741         }
2742
2743         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2744                 device_printf(sc->sc_dev,
2745                     "failed to send bt coex configuration: %d\n", error);
2746                 return error;
2747         }
2748
2749         /* Init Smart FIFO. */
2750         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2751         if (error != 0)
2752                 return error;
2753
2754         /* Send TX valid antennas before triggering calibrations */
2755         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2756                 device_printf(sc->sc_dev,
2757                     "failed to send antennas before calibration: %d\n", error);
2758                 return error;
2759         }
2760
2761         /*
2762          * Send phy configurations command to init uCode
2763          * to start the 16.0 uCode init image internal calibrations.
2764          */
2765         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2766                 device_printf(sc->sc_dev,
2767                     "%s: failed to run internal calibration: %d\n",
2768                     __func__, error);
2769                 return error;
2770         }
2771
2772         /*
2773          * Nothing to do but wait for the init complete notification
2774          * from the firmware
2775          */
2776         while (!sc->sc_init_complete) {
2777 #if defined(__DragonFly__)
2778                 error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
2779                                  0, "iwminit", 2*hz);
2780 #else
2781                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2782                                  0, "iwminit", 2*hz);
2783 #endif
2784                 if (error) {
2785                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2786                                 sc->sc_init_complete);
2787                         break;
2788                 }
2789         }
2790
2791         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2792             sc->sc_init_complete ? "" : "not ");
2793
2794         return error;
2795 }
2796
2797 /*
2798  * receive side
2799  */
2800
2801 /* (re)stock rx ring, called at init-time and at runtime */
2802 static int
2803 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2804 {
2805         struct iwm_rx_ring *ring = &sc->rxq;
2806         struct iwm_rx_data *data = &ring->data[idx];
2807         struct mbuf *m;
2808         bus_dmamap_t dmamap = NULL;
2809         bus_dma_segment_t seg;
2810         int nsegs, error;
2811
2812         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2813         if (m == NULL)
2814                 return ENOBUFS;
2815
2816         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2817 #if defined(__DragonFly__)
2818         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2819             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2820 #else
2821         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2822             &seg, &nsegs, BUS_DMA_NOWAIT);
2823 #endif
2824         if (error != 0) {
2825                 device_printf(sc->sc_dev,
2826                     "%s: can't map mbuf, error %d\n", __func__, error);
2827                 goto fail;
2828         }
2829
2830         if (data->m != NULL)
2831                 bus_dmamap_unload(ring->data_dmat, data->map);
2832
2833         /* Swap ring->spare_map with data->map */
2834         dmamap = data->map;
2835         data->map = ring->spare_map;
2836         ring->spare_map = dmamap;
2837
2838         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2839         data->m = m;
2840
2841         /* Update RX descriptor. */
2842         KKASSERT((seg.ds_addr & 255) == 0);
2843         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2844         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2845             BUS_DMASYNC_PREWRITE);
2846
2847         return 0;
2848 fail:
2849         m_freem(m);
2850         return error;
2851 }
2852
2853 #define IWM_RSSI_OFFSET 50
2854 static int
2855 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2856 {
2857         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2858         uint32_t agc_a, agc_b;
2859         uint32_t val;
2860
2861         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2862         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2863         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2864
2865         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2866         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2867         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2868
2869         /*
2870          * dBm = rssi dB - agc dB - constant.
2871          * Higher AGC (higher radio gain) means lower signal.
2872          */
2873         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2874         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2875         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2876
2877         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2878             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2879             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2880
2881         return max_rssi_dbm;
2882 }
2883
2884 /*
2885  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2886  * values are reported by the fw as positive values - need to negate
2887  * to obtain their dBM.  Account for missing antennas by replacing 0
2888  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2889  */
2890 static int
2891 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2892 {
2893         int energy_a, energy_b, energy_c, max_energy;
2894         uint32_t val;
2895
2896         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2897         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2898             IWM_RX_INFO_ENERGY_ANT_A_POS;
2899         energy_a = energy_a ? -energy_a : -256;
2900         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2901             IWM_RX_INFO_ENERGY_ANT_B_POS;
2902         energy_b = energy_b ? -energy_b : -256;
2903         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2904             IWM_RX_INFO_ENERGY_ANT_C_POS;
2905         energy_c = energy_c ? -energy_c : -256;
2906         max_energy = MAX(energy_a, energy_b);
2907         max_energy = MAX(max_energy, energy_c);
2908
2909         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2910             "energy In A %d B %d C %d , and max %d\n",
2911             energy_a, energy_b, energy_c, max_energy);
2912
2913         return max_energy;
2914 }
2915
2916 static void
2917 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2918         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2919 {
2920         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2921
2922         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2923         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2924
2925         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2926 }
2927
2928 /*
2929  * Retrieve the average noise (in dBm) among receivers.
2930  */
2931 static int
2932 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2933 {
2934         int i, total, nbant, noise;
2935
2936         total = nbant = noise = 0;
2937         for (i = 0; i < 3; i++) {
2938                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2939                 if (noise) {
2940                         total += noise;
2941                         nbant++;
2942                 }
2943         }
2944
2945         /* There should be at least one antenna but check anyway. */
2946         return (nbant == 0) ? -127 : (total / nbant) - 107;
2947 }
2948
2949 /*
2950  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2951  *
2952  * Handles the actual data of the Rx packet from the fw
2953  */
2954 static void
2955 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2956         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2957 {
2958         struct ieee80211com *ic = &sc->sc_ic;
2959         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2960         struct ieee80211_frame *wh;
2961         struct ieee80211_node *ni;
2962         struct ieee80211_rx_stats rxs;
2963         struct mbuf *m;
2964         struct iwm_rx_phy_info *phy_info;
2965         struct iwm_rx_mpdu_res_start *rx_res;
2966         uint32_t len;
2967         uint32_t rx_pkt_status;
2968         int rssi;
2969
2970         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2971
2972         phy_info = &sc->sc_last_phy_info;
2973         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2974         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2975         len = le16toh(rx_res->byte_count);
2976         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2977
2978         m = data->m;
2979         m->m_data = pkt->data + sizeof(*rx_res);
2980         m->m_pkthdr.len = m->m_len = len;
2981
2982         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2983                 device_printf(sc->sc_dev,
2984                     "dsp size out of range [0,20]: %d\n",
2985                     phy_info->cfg_phy_cnt);
2986                 return;
2987         }
2988
2989         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2990             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2991                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2992                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2993                 return; /* drop */
2994         }
2995
2996         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2997                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2998         } else {
2999                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3000         }
3001         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
3002         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
3003
3004         /* replenish ring for the buffer we're going to feed to the sharks */
3005         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3006                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3007                     __func__);
3008                 return;
3009         }
3010
3011         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3012
3013         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3014             "%s: phy_info: channel=%d, flags=0x%08x\n",
3015             __func__,
3016             le16toh(phy_info->channel),
3017             le16toh(phy_info->phy_flags));
3018
3019         /*
3020          * Populate an RX state struct with the provided information.
3021          */
3022         bzero(&rxs, sizeof(rxs));
3023         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3024         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3025         rxs.c_ieee = le16toh(phy_info->channel);
3026         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3027                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3028         } else {
3029                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3030         }
3031         rxs.rssi = rssi - sc->sc_noise;
3032         rxs.nf = sc->sc_noise;
3033
3034         if (ieee80211_radiotap_active_vap(vap)) {
3035                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3036
3037                 tap->wr_flags = 0;
3038                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3039                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3040                 tap->wr_chan_freq = htole16(rxs.c_freq);
3041                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3042                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3043                 tap->wr_dbm_antsignal = (int8_t)rssi;
3044                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3045                 tap->wr_tsft = phy_info->system_timestamp;
3046                 switch (phy_info->rate) {
3047                 /* CCK rates. */
3048                 case  10: tap->wr_rate =   2; break;
3049                 case  20: tap->wr_rate =   4; break;
3050                 case  55: tap->wr_rate =  11; break;
3051                 case 110: tap->wr_rate =  22; break;
3052                 /* OFDM rates. */
3053                 case 0xd: tap->wr_rate =  12; break;
3054                 case 0xf: tap->wr_rate =  18; break;
3055                 case 0x5: tap->wr_rate =  24; break;
3056                 case 0x7: tap->wr_rate =  36; break;
3057                 case 0x9: tap->wr_rate =  48; break;
3058                 case 0xb: tap->wr_rate =  72; break;
3059                 case 0x1: tap->wr_rate =  96; break;
3060                 case 0x3: tap->wr_rate = 108; break;
3061                 /* Unknown rate: should not happen. */
3062                 default:  tap->wr_rate =   0;
3063                 }
3064         }
3065
3066         IWM_UNLOCK(sc);
3067         if (ni != NULL) {
3068                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3069                 ieee80211_input_mimo(ni, m, &rxs);
3070                 ieee80211_free_node(ni);
3071         } else {
3072                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3073                 ieee80211_input_mimo_all(ic, m, &rxs);
3074         }
3075         IWM_LOCK(sc);
3076 }
3077
3078 static int
3079 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3080         struct iwm_node *in)
3081 {
3082         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3083         struct ieee80211_node *ni = &in->in_ni;
3084         struct ieee80211vap *vap = ni->ni_vap;
3085         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3086         int failack = tx_resp->failure_frame;
3087
3088         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3089
3090         /* Update rate control statistics. */
3091         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3092             __func__,
3093             (int) le16toh(tx_resp->status.status),
3094             (int) le16toh(tx_resp->status.sequence),
3095             tx_resp->frame_count,
3096             tx_resp->bt_kill_count,
3097             tx_resp->failure_rts,
3098             tx_resp->failure_frame,
3099             le32toh(tx_resp->initial_rate),
3100             (int) le16toh(tx_resp->wireless_media_time));
3101
3102         if (status != IWM_TX_STATUS_SUCCESS &&
3103             status != IWM_TX_STATUS_DIRECT_DONE) {
3104                 ieee80211_ratectl_tx_complete(vap, ni,
3105                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3106                 return (1);
3107         } else {
3108                 ieee80211_ratectl_tx_complete(vap, ni,
3109                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3110                 return (0);
3111         }
3112 }
3113
3114 static void
3115 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3116         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3117 {
3118         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3119         int idx = cmd_hdr->idx;
3120         int qid = cmd_hdr->qid;
3121         struct iwm_tx_ring *ring = &sc->txq[qid];
3122         struct iwm_tx_data *txd = &ring->data[idx];
3123         struct iwm_node *in = txd->in;
3124         struct mbuf *m = txd->m;
3125         int status;
3126
3127         KASSERT(txd->done == 0, ("txd not done"));
3128         KASSERT(txd->in != NULL, ("txd without node"));
3129         KASSERT(txd->m != NULL, ("txd without mbuf"));
3130
3131         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3132
3133         sc->sc_tx_timer = 0;
3134
3135         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3136
3137         /* Unmap and free mbuf. */
3138         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3139         bus_dmamap_unload(ring->data_dmat, txd->map);
3140
3141         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3142             "free txd %p, in %p\n", txd, txd->in);
3143         txd->done = 1;
3144         txd->m = NULL;
3145         txd->in = NULL;
3146
3147         ieee80211_tx_complete(&in->in_ni, m, status);
3148
3149         if (--ring->queued < IWM_TX_RING_LOMARK) {
3150                 sc->qfullmsk &= ~(1 << ring->qid);
3151                 if (sc->qfullmsk == 0) {
3152                         /*
3153                          * Well, we're in interrupt context, but then again
3154                          * I guess net80211 does all sorts of stunts in
3155                          * interrupt context, so maybe this is no biggie.
3156                          */
3157                         iwm_start(sc);
3158                 }
3159         }
3160 }
3161
3162 /*
3163  * transmit side
3164  */
3165
3166 /*
3167  * Process a "command done" firmware notification.  This is where we wakeup
3168  * processes waiting for a synchronous command completion.
3169  * from if_iwn
3170  */
3171 static void
3172 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3173 {
3174         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3175         struct iwm_tx_data *data;
3176
3177         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3178                 return; /* Not a command ack. */
3179         }
3180
3181         data = &ring->data[pkt->hdr.idx];
3182
3183         /* If the command was mapped in an mbuf, free it. */
3184         if (data->m != NULL) {
3185                 bus_dmamap_sync(ring->data_dmat, data->map,
3186                     BUS_DMASYNC_POSTWRITE);
3187                 bus_dmamap_unload(ring->data_dmat, data->map);
3188                 m_freem(data->m);
3189                 data->m = NULL;
3190         }
3191         wakeup(&ring->desc[pkt->hdr.idx]);
3192 }
3193
3194 #if 0
3195 /*
3196  * necessary only for block ack mode
3197  */
3198 void
3199 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3200         uint16_t len)
3201 {
3202         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3203         uint16_t w_val;
3204
3205         scd_bc_tbl = sc->sched_dma.vaddr;
3206
3207         len += 8; /* magic numbers came naturally from paris */
3208         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3209                 len = roundup(len, 4) / 4;
3210
3211         w_val = htole16(sta_id << 12 | len);
3212
3213         /* Update TX scheduler. */
3214         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3215         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3216             BUS_DMASYNC_PREWRITE);
3217
3218         /* I really wonder what this is ?!? */
3219         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3220                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3221                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3222                     BUS_DMASYNC_PREWRITE);
3223         }
3224 }
3225 #endif
3226
3227 /*
3228  * Take an 802.11 (non-n) rate, find the relevant rate
3229  * table entry.  return the index into in_ridx[].
3230  *
3231  * The caller then uses that index back into in_ridx
3232  * to figure out the rate index programmed /into/
3233  * the firmware for this given node.
3234  */
3235 static int
3236 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3237     uint8_t rate)
3238 {
3239         int i;
3240         uint8_t r;
3241
3242         for (i = 0; i < nitems(in->in_ridx); i++) {
3243                 r = iwm_rates[in->in_ridx[i]].rate;
3244                 if (rate == r)
3245                         return (i);
3246         }
3247         /* XXX Return the first */
3248         /* XXX TODO: have it return the /lowest/ */
3249         return (0);
3250 }
3251
3252 /*
3253  * Fill in the rate related information for a transmit command.
3254  */
3255 static const struct iwm_rate *
3256 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3257         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3258 {
3259         struct ieee80211com *ic = &sc->sc_ic;
3260         struct ieee80211_node *ni = &in->in_ni;
3261         const struct iwm_rate *rinfo;
3262         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3263         int ridx, rate_flags;
3264
3265         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3266         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3267
3268         /*
3269          * XXX TODO: everything about the rate selection here is terrible!
3270          */
3271
3272         if (type == IEEE80211_FC0_TYPE_DATA) {
3273                 int i;
3274                 /* for data frames, use RS table */
3275                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3276                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3277                 ridx = in->in_ridx[i];
3278
3279                 /* This is the index into the programmed table */
3280                 tx->initial_rate_index = i;
3281                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3282                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3283                     "%s: start with i=%d, txrate %d\n",
3284                     __func__, i, iwm_rates[ridx].rate);
3285         } else {
3286                 /*
3287                  * For non-data, use the lowest supported rate for the given
3288                  * operational mode.
3289                  *
3290                  * Note: there may not be any rate control information available.
3291                  * This driver currently assumes if we're transmitting data
3292                  * frames, use the rate control table.  Grr.
3293                  *
3294                  * XXX TODO: use the configured rate for the traffic type!
3295                  * XXX TODO: this should be per-vap, not curmode; as we later
3296                  * on we'll want to handle off-channel stuff (eg TDLS).
3297                  */
3298                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3299                         /*
3300                          * XXX this assumes the mode is either 11a or not 11a;
3301                          * definitely won't work for 11n.
3302                          */
3303                         ridx = IWM_RIDX_OFDM;
3304                 } else {
3305                         ridx = IWM_RIDX_CCK;
3306                 }
3307         }
3308
3309         rinfo = &iwm_rates[ridx];
3310
3311         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3312             __func__, ridx,
3313             rinfo->rate,
3314             !! (IWM_RIDX_IS_CCK(ridx))
3315             );
3316
3317         /* XXX TODO: hard-coded TX antenna? */
3318         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3319         if (IWM_RIDX_IS_CCK(ridx))
3320                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3321         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3322
3323         return rinfo;
3324 }
3325
3326 #define TB0_SIZE 16
3327 static int
3328 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3329 {
3330         struct ieee80211com *ic = &sc->sc_ic;
3331         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3332         struct iwm_node *in = IWM_NODE(ni);
3333         struct iwm_tx_ring *ring;
3334         struct iwm_tx_data *data;
3335         struct iwm_tfd *desc;
3336         struct iwm_device_cmd *cmd;
3337         struct iwm_tx_cmd *tx;
3338         struct ieee80211_frame *wh;
3339         struct ieee80211_key *k = NULL;
3340 #if !defined(__DragonFly__)
3341         struct mbuf *m1;
3342 #endif
3343         const struct iwm_rate *rinfo;
3344         uint32_t flags;
3345         u_int hdrlen;
3346         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3347         int nsegs;
3348         uint8_t tid, type;
3349         int i, totlen, error, pad;
3350
3351         wh = mtod(m, struct ieee80211_frame *);
3352         hdrlen = ieee80211_anyhdrsize(wh);
3353         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3354         tid = 0;
3355         ring = &sc->txq[ac];
3356         desc = &ring->desc[ring->cur];
3357         memset(desc, 0, sizeof(*desc));
3358         data = &ring->data[ring->cur];
3359
3360         /* Fill out iwm_tx_cmd to send to the firmware */
3361         cmd = &ring->cmd[ring->cur];
3362         cmd->hdr.code = IWM_TX_CMD;
3363         cmd->hdr.flags = 0;
3364         cmd->hdr.qid = ring->qid;
3365         cmd->hdr.idx = ring->cur;
3366
3367         tx = (void *)cmd->data;
3368         memset(tx, 0, sizeof(*tx));
3369
3370         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3371
3372         /* Encrypt the frame if need be. */
3373         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3374                 /* Retrieve key for TX && do software encryption. */
3375                 k = ieee80211_crypto_encap(ni, m);
3376                 if (k == NULL) {
3377                         m_freem(m);
3378                         return (ENOBUFS);
3379                 }
3380                 /* 802.11 header may have moved. */
3381                 wh = mtod(m, struct ieee80211_frame *);
3382         }
3383
3384         if (ieee80211_radiotap_active_vap(vap)) {
3385                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3386
3387                 tap->wt_flags = 0;
3388                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3389                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3390                 tap->wt_rate = rinfo->rate;
3391                 if (k != NULL)
3392                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3393                 ieee80211_radiotap_tx(vap, m);
3394         }
3395
3396
3397         totlen = m->m_pkthdr.len;
3398
3399         flags = 0;
3400         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3401                 flags |= IWM_TX_CMD_FLG_ACK;
3402         }
3403
3404         if (type == IEEE80211_FC0_TYPE_DATA
3405             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3406             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3407                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3408         }
3409
3410         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3411             type != IEEE80211_FC0_TYPE_DATA)
3412                 tx->sta_id = sc->sc_aux_sta.sta_id;
3413         else
3414                 tx->sta_id = IWM_STATION_ID;
3415
3416         if (type == IEEE80211_FC0_TYPE_MGT) {
3417                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3418
3419                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3420                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3421                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3422                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3423                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3424                 } else {
3425                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3426                 }
3427         } else {
3428                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3429         }
3430
3431         if (hdrlen & 3) {
3432                 /* First segment length must be a multiple of 4. */
3433                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3434                 pad = 4 - (hdrlen & 3);
3435         } else
3436                 pad = 0;
3437
3438         tx->driver_txop = 0;
3439         tx->next_frame_len = 0;
3440
3441         tx->len = htole16(totlen);
3442         tx->tid_tspec = tid;
3443         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3444
3445         /* Set physical address of "scratch area". */
3446         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3447         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3448
3449         /* Copy 802.11 header in TX command. */
3450         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3451
3452         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3453
3454         tx->sec_ctl = 0;
3455         tx->tx_flags |= htole32(flags);
3456
3457         /* Trim 802.11 header. */
3458         m_adj(m, hdrlen);
3459 #if defined(__DragonFly__)
3460         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3461                                             segs, IWM_MAX_SCATTER - 2,
3462                                             &nsegs, BUS_DMA_NOWAIT);
3463 #else
3464         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3465             segs, &nsegs, BUS_DMA_NOWAIT);
3466 #endif
3467         if (error != 0) {
3468 #if defined(__DragonFly__)
3469                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3470                     error);
3471                 m_freem(m);
3472                 return error;
3473 #else
3474                 if (error != EFBIG) {
3475                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3476                             error);
3477                         m_freem(m);
3478                         return error;
3479                 }
3480                 /* Too many DMA segments, linearize mbuf. */
3481                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3482                 if (m1 == NULL) {
3483                         device_printf(sc->sc_dev,
3484                             "%s: could not defrag mbuf\n", __func__);
3485                         m_freem(m);
3486                         return (ENOBUFS);
3487                 }
3488                 m = m1;
3489
3490                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3491                     segs, &nsegs, BUS_DMA_NOWAIT);
3492                 if (error != 0) {
3493                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3494                             error);
3495                         m_freem(m);
3496                         return error;
3497                 }
3498 #endif
3499         }
3500         data->m = m;
3501         data->in = in;
3502         data->done = 0;
3503
3504         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3505             "sending txd %p, in %p\n", data, data->in);
3506         KASSERT(data->in != NULL, ("node is NULL"));
3507
3508         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3509             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3510             ring->qid, ring->cur, totlen, nsegs,
3511             le32toh(tx->tx_flags),
3512             le32toh(tx->rate_n_flags),
3513             tx->initial_rate_index
3514             );
3515
3516         /* Fill TX descriptor. */
3517         desc->num_tbs = 2 + nsegs;
3518
3519         desc->tbs[0].lo = htole32(data->cmd_paddr);
3520         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3521             (TB0_SIZE << 4);
3522         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3523         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3524             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3525               + hdrlen + pad - TB0_SIZE) << 4);
3526
3527         /* Other DMA segments are for data payload. */
3528         for (i = 0; i < nsegs; i++) {
3529                 seg = &segs[i];
3530                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3531                 desc->tbs[i+2].hi_n_len = \
3532                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3533                     | ((seg->ds_len) << 4);
3534         }
3535
3536         bus_dmamap_sync(ring->data_dmat, data->map,
3537             BUS_DMASYNC_PREWRITE);
3538         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3539             BUS_DMASYNC_PREWRITE);
3540         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3541             BUS_DMASYNC_PREWRITE);
3542
3543 #if 0
3544         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3545 #endif
3546
3547         /* Kick TX ring. */
3548         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3549         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3550
3551         /* Mark TX ring as full if we reach a certain threshold. */
3552         if (++ring->queued > IWM_TX_RING_HIMARK) {
3553                 sc->qfullmsk |= 1 << ring->qid;
3554         }
3555
3556         return 0;
3557 }
3558
3559 static int
3560 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3561     const struct ieee80211_bpf_params *params)
3562 {
3563         struct ieee80211com *ic = ni->ni_ic;
3564         struct iwm_softc *sc = ic->ic_softc;
3565         int error = 0;
3566
3567         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3568             "->%s begin\n", __func__);
3569
3570         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3571                 m_freem(m);
3572                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3573                     "<-%s not RUNNING\n", __func__);
3574                 return (ENETDOWN);
3575         }
3576
3577         IWM_LOCK(sc);
3578         /* XXX fix this */
3579         if (params == NULL) {
3580                 error = iwm_tx(sc, m, ni, 0);
3581         } else {
3582                 error = iwm_tx(sc, m, ni, 0);
3583         }
3584         sc->sc_tx_timer = 5;
3585         IWM_UNLOCK(sc);
3586
3587         return (error);
3588 }
3589
3590 /*
3591  * mvm/tx.c
3592  */
3593
3594 #if 0
3595 /*
3596  * Note that there are transports that buffer frames before they reach
3597  * the firmware. This means that after flush_tx_path is called, the
3598  * queue might not be empty. The race-free way to handle this is to:
3599  * 1) set the station as draining
3600  * 2) flush the Tx path
3601  * 3) wait for the transport queues to be empty
3602  */
3603 int
3604 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3605 {
3606         struct iwm_tx_path_flush_cmd flush_cmd = {
3607                 .queues_ctl = htole32(tfd_msk),
3608                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3609         };
3610         int ret;
3611
3612         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3613             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3614             sizeof(flush_cmd), &flush_cmd);
3615         if (ret)
3616                 device_printf(sc->sc_dev,
3617                     "Flushing tx queue failed: %d\n", ret);
3618         return ret;
3619 }
3620 #endif
3621
3622 static int
3623 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3624         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3625 {
3626         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3627             cmd, status);
3628 }
3629
3630 /* send station add/update command to firmware */
3631 static int
3632 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3633 {
3634         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3635         int ret;
3636         uint32_t status;
3637
3638         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3639
3640         add_sta_cmd.sta_id = IWM_STATION_ID;
3641         add_sta_cmd.mac_id_n_color
3642             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3643                 IWM_DEFAULT_COLOR));
3644         if (!update) {
3645                 int ac;
3646                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3647                         add_sta_cmd.tfd_queue_msk |=
3648                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3649                 }
3650                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3651         }
3652         add_sta_cmd.add_modify = update ? 1 : 0;
3653         add_sta_cmd.station_flags_msk
3654             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3655         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3656         if (update)
3657                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3658
3659         status = IWM_ADD_STA_SUCCESS;
3660         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3661         if (ret)
3662                 return ret;
3663
3664         switch (status) {
3665         case IWM_ADD_STA_SUCCESS:
3666                 break;
3667         default:
3668                 ret = EIO;
3669                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3670                 break;
3671         }
3672
3673         return ret;
3674 }
3675
3676 static int
3677 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3678 {
3679         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3680 }
3681
3682 static int
3683 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3684 {
3685         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3686 }
3687
3688 static int
3689 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3690         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3691 {
3692         struct iwm_mvm_add_sta_cmd_v7 cmd;
3693         int ret;
3694         uint32_t status;
3695
3696         memset(&cmd, 0, sizeof(cmd));
3697         cmd.sta_id = sta->sta_id;
3698         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3699
3700         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3701         cmd.tid_disable_tx = htole16(0xffff);
3702
3703         if (addr)
3704                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3705
3706         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3707         if (ret)
3708                 return ret;
3709
3710         switch (status) {
3711         case IWM_ADD_STA_SUCCESS:
3712                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3713                     "%s: Internal station added.\n", __func__);
3714                 return 0;
3715         default:
3716                 device_printf(sc->sc_dev,
3717                     "%s: Add internal station failed, status=0x%x\n",
3718                     __func__, status);
3719                 ret = EIO;
3720                 break;
3721         }
3722         return ret;
3723 }
3724
3725 static int
3726 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3727 {
3728         int ret;
3729
3730         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3731         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3732
3733         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3734         if (ret)
3735                 return ret;
3736
3737         ret = iwm_mvm_add_int_sta_common(sc,
3738             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3739
3740         if (ret)
3741                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3742         return ret;
3743 }
3744
3745 static int
3746 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3747 {
3748         struct iwm_time_quota_cmd cmd;
3749         int i, idx, ret, num_active_macs, quota, quota_rem;
3750         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3751         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3752         uint16_t id;
3753
3754         memset(&cmd, 0, sizeof(cmd));
3755
3756         /* currently, PHY ID == binding ID */
3757         if (in) {
3758                 id = in->in_phyctxt->id;
3759                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3760                 colors[id] = in->in_phyctxt->color;
3761
3762                 if (1)
3763                         n_ifs[id] = 1;
3764         }
3765
3766         /*
3767          * The FW's scheduling session consists of
3768          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3769          * equally between all the bindings that require quota
3770          */
3771         num_active_macs = 0;
3772         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3773                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3774                 num_active_macs += n_ifs[i];
3775         }
3776
3777         quota = 0;
3778         quota_rem = 0;
3779         if (num_active_macs) {
3780                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3781                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3782         }
3783
3784         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3785                 if (colors[i] < 0)
3786                         continue;
3787
3788                 cmd.quotas[idx].id_and_color =
3789                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3790
3791                 if (n_ifs[i] <= 0) {
3792                         cmd.quotas[idx].quota = htole32(0);
3793                         cmd.quotas[idx].max_duration = htole32(0);
3794                 } else {
3795                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3796                         cmd.quotas[idx].max_duration = htole32(0);
3797                 }
3798                 idx++;
3799         }
3800
3801         /* Give the remainder of the session to the first binding */
3802         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3803
3804         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3805             sizeof(cmd), &cmd);
3806         if (ret)
3807                 device_printf(sc->sc_dev,
3808                     "%s: Failed to send quota: %d\n", __func__, ret);
3809         return ret;
3810 }
3811
3812 /*
3813  * ieee80211 routines
3814  */
3815
3816 /*
3817  * Change to AUTH state in 80211 state machine.  Roughly matches what
3818  * Linux does in bss_info_changed().
3819  */
3820 static int
3821 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3822 {
3823         struct ieee80211_node *ni;
3824         struct iwm_node *in;
3825         struct iwm_vap *iv = IWM_VAP(vap);
3826         uint32_t duration;
3827         int error;
3828
3829         /*
3830          * XXX i have a feeling that the vap node is being
3831          * freed from underneath us. Grr.
3832          */
3833         ni = ieee80211_ref_node(vap->iv_bss);
3834         in = IWM_NODE(ni);
3835         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3836             "%s: called; vap=%p, bss ni=%p\n",
3837             __func__,
3838             vap,
3839             ni);
3840
3841         in->in_assoc = 0;
3842
3843         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3844         if (error != 0)
3845                 return error;
3846
3847         error = iwm_allow_mcast(vap, sc);
3848         if (error) {
3849                 device_printf(sc->sc_dev,
3850                     "%s: failed to set multicast\n", __func__);
3851                 goto out;
3852         }
3853
3854         /*
3855          * This is where it deviates from what Linux does.
3856          *
3857          * Linux iwlwifi doesn't reset the nic each time, nor does it
3858          * call ctxt_add() here.  Instead, it adds it during vap creation,
3859          * and always does a mac_ctx_changed().
3860          *
3861          * The openbsd port doesn't attempt to do that - it reset things
3862          * at odd states and does the add here.
3863          *
3864          * So, until the state handling is fixed (ie, we never reset
3865          * the NIC except for a firmware failure, which should drag
3866          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3867          * contexts that are required), let's do a dirty hack here.
3868          */
3869         if (iv->is_uploaded) {
3870                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3871                         device_printf(sc->sc_dev,
3872                             "%s: failed to update MAC\n", __func__);
3873                         goto out;
3874                 }
3875                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3876                     in->in_ni.ni_chan, 1, 1)) != 0) {
3877                         device_printf(sc->sc_dev,
3878                             "%s: failed update phy ctxt\n", __func__);
3879                         goto out;
3880                 }
3881                 in->in_phyctxt = &sc->sc_phyctxt[0];
3882
3883                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3884                         device_printf(sc->sc_dev,
3885                             "%s: binding update cmd\n", __func__);
3886                         goto out;
3887                 }
3888                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3889                         device_printf(sc->sc_dev,
3890                             "%s: failed to update sta\n", __func__);
3891                         goto out;
3892                 }
3893         } else {
3894                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3895                         device_printf(sc->sc_dev,
3896                             "%s: failed to add MAC\n", __func__);
3897                         goto out;
3898                 }
3899                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3900                     in->in_ni.ni_chan, 1, 1)) != 0) {
3901                         device_printf(sc->sc_dev,
3902                             "%s: failed add phy ctxt!\n", __func__);
3903                         error = ETIMEDOUT;
3904                         goto out;
3905                 }
3906                 in->in_phyctxt = &sc->sc_phyctxt[0];
3907
3908                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3909                         device_printf(sc->sc_dev,
3910                             "%s: binding add cmd\n", __func__);
3911                         goto out;
3912                 }
3913                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3914                         device_printf(sc->sc_dev,
3915                             "%s: failed to add sta\n", __func__);
3916                         goto out;
3917                 }
3918         }
3919
3920         /*
3921          * Prevent the FW from wandering off channel during association
3922          * by "protecting" the session with a time event.
3923          */
3924         /* XXX duration is in units of TU, not MS */
3925         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3926         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3927         DELAY(100);
3928
3929         error = 0;
3930 out:
3931         ieee80211_free_node(ni);
3932         return (error);
3933 }
3934
3935 static int
3936 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3937 {
3938         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3939         int error;
3940
3941         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3942                 device_printf(sc->sc_dev,
3943                     "%s: failed to update STA\n", __func__);
3944                 return error;
3945         }
3946
3947         in->in_assoc = 1;
3948         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3949                 device_printf(sc->sc_dev,
3950                     "%s: failed to update MAC\n", __func__);
3951                 return error;
3952         }
3953
3954         return 0;
3955 }
3956
3957 static int
3958 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3959 {
3960         /*
3961          * Ok, so *technically* the proper set of calls for going
3962          * from RUN back to SCAN is:
3963          *
3964          * iwm_mvm_power_mac_disable(sc, in);
3965          * iwm_mvm_mac_ctxt_changed(sc, in);
3966          * iwm_mvm_rm_sta(sc, in);
3967          * iwm_mvm_update_quotas(sc, NULL);
3968          * iwm_mvm_mac_ctxt_changed(sc, in);
3969          * iwm_mvm_binding_remove_vif(sc, in);
3970          * iwm_mvm_mac_ctxt_remove(sc, in);
3971          *
3972          * However, that freezes the device not matter which permutations
3973          * and modifications are attempted.  Obviously, this driver is missing
3974          * something since it works in the Linux driver, but figuring out what
3975          * is missing is a little more complicated.  Now, since we're going
3976          * back to nothing anyway, we'll just do a complete device reset.
3977          * Up your's, device!
3978          */
3979         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
3980         iwm_stop_device(sc);
3981         iwm_init_hw(sc);
3982         if (in)
3983                 in->in_assoc = 0;
3984         return 0;
3985
3986 #if 0
3987         int error;
3988
3989         iwm_mvm_power_mac_disable(sc, in);
3990
3991         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3992                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3993                 return error;
3994         }
3995
3996         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3997                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3998                 return error;
3999         }
4000         error = iwm_mvm_rm_sta(sc, in);
4001         in->in_assoc = 0;
4002         iwm_mvm_update_quotas(sc, NULL);
4003         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4004                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4005                 return error;
4006         }
4007         iwm_mvm_binding_remove_vif(sc, in);
4008
4009         iwm_mvm_mac_ctxt_remove(sc, in);
4010
4011         return error;
4012 #endif
4013 }
4014
4015 static struct ieee80211_node *
4016 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4017 {
4018         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4019             M_INTWAIT | M_ZERO);
4020 }
4021
4022 static void
4023 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4024 {
4025         struct ieee80211_node *ni = &in->in_ni;
4026         struct iwm_lq_cmd *lq = &in->in_lq;
4027         int nrates = ni->ni_rates.rs_nrates;
4028         int i, ridx, tab = 0;
4029         int txant = 0;
4030
4031         if (nrates > nitems(lq->rs_table)) {
4032                 device_printf(sc->sc_dev,
4033                     "%s: node supports %d rates, driver handles "
4034                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4035                 return;
4036         }
4037         if (nrates == 0) {
4038                 device_printf(sc->sc_dev,
4039                     "%s: node supports 0 rates, odd!\n", __func__);
4040                 return;
4041         }
4042
4043         /*
4044          * XXX .. and most of iwm_node is not initialised explicitly;
4045          * it's all just 0x0 passed to the firmware.
4046          */
4047
4048         /* first figure out which rates we should support */
4049         /* XXX TODO: this isn't 11n aware /at all/ */
4050         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4051         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4052             "%s: nrates=%d\n", __func__, nrates);
4053
4054         /*
4055          * Loop over nrates and populate in_ridx from the highest
4056          * rate to the lowest rate.  Remember, in_ridx[] has
4057          * IEEE80211_RATE_MAXSIZE entries!
4058          */
4059         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4060                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4061
4062                 /* Map 802.11 rate to HW rate index. */
4063                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4064                         if (iwm_rates[ridx].rate == rate)
4065                                 break;
4066                 if (ridx > IWM_RIDX_MAX) {
4067                         device_printf(sc->sc_dev,
4068                             "%s: WARNING: device rate for %d not found!\n",
4069                             __func__, rate);
4070                 } else {
4071                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4072                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4073                             __func__,
4074                             i,
4075                             rate,
4076                             ridx);
4077                         in->in_ridx[i] = ridx;
4078                 }
4079         }
4080
4081         /* then construct a lq_cmd based on those */
4082         memset(lq, 0, sizeof(*lq));
4083         lq->sta_id = IWM_STATION_ID;
4084
4085         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4086         if (ni->ni_flags & IEEE80211_NODE_HT)
4087                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4088
4089         /*
4090          * are these used? (we don't do SISO or MIMO)
4091          * need to set them to non-zero, though, or we get an error.
4092          */
4093         lq->single_stream_ant_msk = 1;
4094         lq->dual_stream_ant_msk = 1;
4095
4096         /*
4097          * Build the actual rate selection table.
4098          * The lowest bits are the rates.  Additionally,
4099          * CCK needs bit 9 to be set.  The rest of the bits
4100          * we add to the table select the tx antenna
4101          * Note that we add the rates in the highest rate first
4102          * (opposite of ni_rates).
4103          */
4104         /*
4105          * XXX TODO: this should be looping over the min of nrates
4106          * and LQ_MAX_RETRY_NUM.  Sigh.
4107          */
4108         for (i = 0; i < nrates; i++) {
4109                 int nextant;
4110
4111                 if (txant == 0)
4112                         txant = iwm_fw_valid_tx_ant(sc);
4113                 nextant = 1<<(ffs(txant)-1);
4114                 txant &= ~nextant;
4115
4116                 /*
4117                  * Map the rate id into a rate index into
4118                  * our hardware table containing the
4119                  * configuration to use for this rate.
4120                  */
4121                 ridx = in->in_ridx[i];
4122                 tab = iwm_rates[ridx].plcp;
4123                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4124                 if (IWM_RIDX_IS_CCK(ridx))
4125                         tab |= IWM_RATE_MCS_CCK_MSK;
4126                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4127                     "station rate i=%d, rate=%d, hw=%x\n",
4128                     i, iwm_rates[ridx].rate, tab);
4129                 lq->rs_table[i] = htole32(tab);
4130         }
4131         /* then fill the rest with the lowest possible rate */
4132         for (i = nrates; i < nitems(lq->rs_table); i++) {
4133                 KASSERT(tab != 0, ("invalid tab"));
4134                 lq->rs_table[i] = htole32(tab);
4135         }
4136 }
4137
4138 static int
4139 iwm_media_change(struct ifnet *ifp)
4140 {
4141         struct ieee80211vap *vap = ifp->if_softc;
4142         struct ieee80211com *ic = vap->iv_ic;
4143         struct iwm_softc *sc = ic->ic_softc;
4144         int error;
4145
4146         error = ieee80211_media_change(ifp);
4147         if (error != ENETRESET)
4148                 return error;
4149
4150         IWM_LOCK(sc);
4151         if (ic->ic_nrunning > 0) {
4152                 iwm_stop(sc);
4153                 iwm_init(sc);
4154         }
4155         IWM_UNLOCK(sc);
4156         return error;
4157 }
4158
4159
4160 static int
4161 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4162 {
4163         struct iwm_vap *ivp = IWM_VAP(vap);
4164         struct ieee80211com *ic = vap->iv_ic;
4165         struct iwm_softc *sc = ic->ic_softc;
4166         struct iwm_node *in;
4167         int error;
4168
4169         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4170             "switching state %s -> %s\n",
4171             ieee80211_state_name[vap->iv_state],
4172             ieee80211_state_name[nstate]);
4173         IEEE80211_UNLOCK(ic);
4174         IWM_LOCK(sc);
4175
4176         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4177                 iwm_led_blink_stop(sc);
4178
4179         /* disable beacon filtering if we're hopping out of RUN */
4180         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4181                 iwm_mvm_disable_beacon_filter(sc);
4182
4183                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4184                         in->in_assoc = 0;
4185
4186                 iwm_release(sc, NULL);
4187
4188                 /*
4189                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4190                  * above then the card will be completely reinitialized,
4191                  * so the driver must do everything necessary to bring the card
4192                  * from INIT to SCAN.
4193                  *
4194                  * Additionally, upon receiving deauth frame from AP,
4195                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4196                  * state. This will also fail with this driver, so bring the FSM
4197                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4198                  *
4199                  * XXX TODO: fix this for FreeBSD!
4200                  */
4201                 if (nstate == IEEE80211_S_SCAN ||
4202                     nstate == IEEE80211_S_AUTH ||
4203                     nstate == IEEE80211_S_ASSOC) {
4204                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4205                             "Force transition to INIT; MGT=%d\n", arg);
4206                         IWM_UNLOCK(sc);
4207                         IEEE80211_LOCK(ic);
4208                         /* Always pass arg as -1 since we can't Tx right now. */
4209                         /*
4210                          * XXX arg is just ignored anyway when transitioning
4211                          *     to IEEE80211_S_INIT.
4212                          */
4213                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4214                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4215                             "Going INIT->SCAN\n");
4216                         nstate = IEEE80211_S_SCAN;
4217                         IEEE80211_UNLOCK(ic);
4218                         IWM_LOCK(sc);
4219                 }
4220         }
4221
4222         switch (nstate) {
4223         case IEEE80211_S_INIT:
4224                 break;
4225
4226         case IEEE80211_S_AUTH:
4227                 if ((error = iwm_auth(vap, sc)) != 0) {
4228                         device_printf(sc->sc_dev,
4229                             "%s: could not move to auth state: %d\n",
4230                             __func__, error);
4231                         break;
4232                 }
4233                 break;
4234
4235         case IEEE80211_S_ASSOC:
4236                 if ((error = iwm_assoc(vap, sc)) != 0) {
4237                         device_printf(sc->sc_dev,
4238                             "%s: failed to associate: %d\n", __func__,
4239                             error);
4240                         break;
4241                 }
4242                 break;
4243
4244         case IEEE80211_S_RUN:
4245         {
4246                 struct iwm_host_cmd cmd = {
4247                         .id = IWM_LQ_CMD,
4248                         .len = { sizeof(in->in_lq), },
4249                         .flags = IWM_CMD_SYNC,
4250                 };
4251
4252                 /* Update the association state, now we have it all */
4253                 /* (eg associd comes in at this point */
4254                 error = iwm_assoc(vap, sc);
4255                 if (error != 0) {
4256                         device_printf(sc->sc_dev,
4257                             "%s: failed to update association state: %d\n",
4258                             __func__,
4259                             error);
4260                         break;
4261                 }
4262
4263                 in = IWM_NODE(vap->iv_bss);
4264                 iwm_mvm_power_mac_update_mode(sc, in);
4265                 iwm_mvm_enable_beacon_filter(sc, in);
4266                 iwm_mvm_update_quotas(sc, in);
4267                 iwm_setrates(sc, in);
4268
4269                 cmd.data[0] = &in->in_lq;
4270                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4271                         device_printf(sc->sc_dev,
4272                             "%s: IWM_LQ_CMD failed\n", __func__);
4273                 }
4274
4275                 iwm_mvm_led_enable(sc);
4276                 break;
4277         }
4278
4279         default:
4280                 break;
4281         }
4282         IWM_UNLOCK(sc);
4283         IEEE80211_LOCK(ic);
4284
4285         return (ivp->iv_newstate(vap, nstate, arg));
4286 }
4287
4288 void
4289 iwm_endscan_cb(void *arg, int pending)
4290 {
4291         struct iwm_softc *sc = arg;
4292         struct ieee80211com *ic = &sc->sc_ic;
4293
4294         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4295             "%s: scan ended\n",
4296             __func__);
4297
4298         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4299 }
4300
4301 /*
4302  * Aging and idle timeouts for the different possible scenarios
4303  * in default configuration
4304  */
4305 static const uint32_t
4306 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4307         {
4308                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4309                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4310         },
4311         {
4312                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4313                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4314         },
4315         {
4316                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4317                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4318         },
4319         {
4320                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4321                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4322         },
4323         {
4324                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4325                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4326         },
4327 };
4328
4329 /*
4330  * Aging and idle timeouts for the different possible scenarios
4331  * in single BSS MAC configuration.
4332  */
4333 static const uint32_t
4334 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4335         {
4336                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4337                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4338         },
4339         {
4340                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4341                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4342         },
4343         {
4344                 htole32(IWM_SF_MCAST_AGING_TIMER),
4345                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4346         },
4347         {
4348                 htole32(IWM_SF_BA_AGING_TIMER),
4349                 htole32(IWM_SF_BA_IDLE_TIMER)
4350         },
4351         {
4352                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4353                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4354         },
4355 };
4356
4357 static void
4358 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4359     struct ieee80211_node *ni)
4360 {
4361         int i, j, watermark;
4362
4363         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4364
4365         /*
4366          * If we are in association flow - check antenna configuration
4367          * capabilities of the AP station, and choose the watermark accordingly.
4368          */
4369         if (ni) {
4370                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4371 #ifdef notyet
4372                         if (ni->ni_rxmcs[2] != 0)
4373                                 watermark = IWM_SF_W_MARK_MIMO3;
4374                         else if (ni->ni_rxmcs[1] != 0)
4375                                 watermark = IWM_SF_W_MARK_MIMO2;
4376                         else
4377 #endif
4378                                 watermark = IWM_SF_W_MARK_SISO;
4379                 } else {
4380                         watermark = IWM_SF_W_MARK_LEGACY;
4381                 }
4382         /* default watermark value for unassociated mode. */
4383         } else {
4384                 watermark = IWM_SF_W_MARK_MIMO2;
4385         }
4386         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4387
4388         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4389                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4390                         sf_cmd->long_delay_timeouts[i][j] =
4391                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4392                 }
4393         }
4394
4395         if (ni) {
4396                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4397                        sizeof(iwm_sf_full_timeout));
4398         } else {
4399                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4400                        sizeof(iwm_sf_full_timeout_def));
4401         }
4402 }
4403
4404 static int
4405 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4406 {
4407         struct ieee80211com *ic = &sc->sc_ic;
4408         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4409         struct iwm_sf_cfg_cmd sf_cmd = {
4410                 .state = htole32(IWM_SF_FULL_ON),
4411         };
4412         int ret = 0;
4413
4414         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4415                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4416
4417         switch (new_state) {
4418         case IWM_SF_UNINIT:
4419         case IWM_SF_INIT_OFF:
4420                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4421                 break;
4422         case IWM_SF_FULL_ON:
4423                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4424                 break;
4425         default:
4426                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4427                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4428                           new_state);
4429                 return EINVAL;
4430         }
4431
4432         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4433                                    sizeof(sf_cmd), &sf_cmd);
4434         return ret;
4435 }
4436
4437 static int
4438 iwm_send_bt_init_conf(struct iwm_softc *sc)
4439 {
4440         struct iwm_bt_coex_cmd bt_cmd;
4441
4442         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4443         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4444
4445         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4446             &bt_cmd);
4447 }
4448
4449 static int
4450 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4451 {
4452         struct iwm_mcc_update_cmd mcc_cmd;
4453         struct iwm_host_cmd hcmd = {
4454                 .id = IWM_MCC_UPDATE_CMD,
4455                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4456                 .data = { &mcc_cmd },
4457         };
4458         int ret;
4459 #ifdef IWM_DEBUG
4460         struct iwm_rx_packet *pkt;
4461         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4462         struct iwm_mcc_update_resp *mcc_resp;
4463         int n_channels;
4464         uint16_t mcc;
4465 #endif
4466         int resp_v2 = isset(sc->sc_enabled_capa,
4467             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4468
4469         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4470         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4471         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4472             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4473                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4474         else
4475                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4476
4477         if (resp_v2)
4478                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4479         else
4480                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4481
4482         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4483             "send MCC update to FW with '%c%c' src = %d\n",
4484             alpha2[0], alpha2[1], mcc_cmd.source_id);
4485
4486         ret = iwm_send_cmd(sc, &hcmd);
4487         if (ret)
4488                 return ret;
4489
4490 #ifdef IWM_DEBUG
4491         pkt = hcmd.resp_pkt;
4492
4493         /* Extract MCC response */
4494         if (resp_v2) {
4495                 mcc_resp = (void *)pkt->data;
4496                 mcc = mcc_resp->mcc;
4497                 n_channels =  le32toh(mcc_resp->n_channels);
4498         } else {
4499                 mcc_resp_v1 = (void *)pkt->data;
4500                 mcc = mcc_resp_v1->mcc;
4501                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4502         }
4503
4504         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4505         if (mcc == 0)
4506                 mcc = 0x3030;  /* "00" - world */
4507
4508         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4509             "regulatory domain '%c%c' (%d channels available)\n",
4510             mcc >> 8, mcc & 0xff, n_channels);
4511 #endif
4512         iwm_free_resp(sc, &hcmd);
4513
4514         return 0;
4515 }
4516
4517 static void
4518 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4519 {
4520         struct iwm_host_cmd cmd = {
4521                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4522                 .len = { sizeof(uint32_t), },
4523                 .data = { &backoff, },
4524         };
4525
4526         if (iwm_send_cmd(sc, &cmd) != 0) {
4527                 device_printf(sc->sc_dev,
4528                     "failed to change thermal tx backoff\n");
4529         }
4530 }
4531
4532 static int
4533 iwm_init_hw(struct iwm_softc *sc)
4534 {
4535         struct ieee80211com *ic = &sc->sc_ic;
4536         int error, i, ac;
4537
4538         if ((error = iwm_start_hw(sc)) != 0) {
4539                 kprintf("iwm_start_hw: failed %d\n", error);
4540                 return error;
4541         }
4542
4543         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4544                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4545                 return error;
4546         }
4547
4548         /*
4549          * should stop and start HW since that INIT
4550          * image just loaded
4551          */
4552         iwm_stop_device(sc);
4553         if ((error = iwm_start_hw(sc)) != 0) {
4554                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4555                 return error;
4556         }
4557
4558         /* omstart, this time with the regular firmware */
4559         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4560         if (error) {
4561                 device_printf(sc->sc_dev, "could not load firmware\n");
4562                 goto error;
4563         }
4564
4565         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4566                 device_printf(sc->sc_dev, "bt init conf failed\n");
4567                 goto error;
4568         }
4569
4570         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4571                 device_printf(sc->sc_dev, "antenna config failed\n");
4572                 goto error;
4573         }
4574
4575         /* Send phy db control command and then phy db calibration*/
4576         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4577                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4578                 goto error;
4579         }
4580
4581         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4582                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4583                 goto error;
4584         }
4585
4586         /* Add auxiliary station for scanning */
4587         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4588                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4589                 goto error;
4590         }
4591
4592         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4593                 /*
4594                  * The channel used here isn't relevant as it's
4595                  * going to be overwritten in the other flows.
4596                  * For now use the first channel we have.
4597                  */
4598                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4599                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4600                         goto error;
4601         }
4602
4603         /* Initialize tx backoffs to the minimum. */
4604         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4605                 iwm_mvm_tt_tx_backoff(sc, 0);
4606
4607         error = iwm_mvm_power_update_device(sc);
4608         if (error)
4609                 goto error;
4610
4611         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4612                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4613                         goto error;
4614         }
4615
4616         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4617                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4618                         goto error;
4619         }
4620
4621         /* Enable Tx queues. */
4622         for (ac = 0; ac < WME_NUM_AC; ac++) {
4623                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4624                     iwm_mvm_ac_to_tx_fifo[ac]);
4625                 if (error)
4626                         goto error;
4627         }
4628
4629         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4630                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4631                 goto error;
4632         }
4633
4634         return 0;
4635
4636  error:
4637         iwm_stop_device(sc);
4638         return error;
4639 }
4640
4641 /* Allow multicast from our BSSID. */
4642 static int
4643 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4644 {
4645         struct ieee80211_node *ni = vap->iv_bss;
4646         struct iwm_mcast_filter_cmd *cmd;
4647         size_t size;
4648         int error;
4649
4650         size = roundup(sizeof(*cmd), 4);
4651         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4652         if (cmd == NULL)
4653                 return ENOMEM;
4654         cmd->filter_own = 1;
4655         cmd->port_id = 0;
4656         cmd->count = 0;
4657         cmd->pass_all = 1;
4658         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4659
4660         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4661             IWM_CMD_SYNC, size, cmd);
4662         kfree(cmd, M_DEVBUF);
4663
4664         return (error);
4665 }
4666
4667 /*
4668  * ifnet interfaces
4669  */
4670
4671 static void
4672 iwm_init(struct iwm_softc *sc)
4673 {
4674         int error;
4675
4676         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4677                 return;
4678         }
4679         sc->sc_generation++;
4680         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4681
4682         if ((error = iwm_init_hw(sc)) != 0) {
4683                 kprintf("iwm_init_hw failed %d\n", error);
4684                 iwm_stop(sc);
4685                 return;
4686         }
4687
4688         /*
4689          * Ok, firmware loaded and we are jogging
4690          */
4691         sc->sc_flags |= IWM_FLAG_HW_INITED;
4692         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4693 }
4694
4695 static int
4696 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4697 {
4698         struct iwm_softc *sc;
4699         int error;
4700
4701         sc = ic->ic_softc;
4702
4703         IWM_LOCK(sc);
4704         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4705                 IWM_UNLOCK(sc);
4706                 return (ENXIO);
4707         }
4708         error = mbufq_enqueue(&sc->sc_snd, m);
4709         if (error) {
4710                 IWM_UNLOCK(sc);
4711                 return (error);
4712         }
4713         iwm_start(sc);
4714         IWM_UNLOCK(sc);
4715         return (0);
4716 }
4717
4718 /*
4719  * Dequeue packets from sendq and call send.
4720  */
4721 static void
4722 iwm_start(struct iwm_softc *sc)
4723 {
4724         struct ieee80211_node *ni;
4725         struct mbuf *m;
4726         int ac = 0;
4727
4728         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4729         while (sc->qfullmsk == 0 &&
4730                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4731                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4732                 if (iwm_tx(sc, m, ni, ac) != 0) {
4733                         if_inc_counter(ni->ni_vap->iv_ifp,
4734                             IFCOUNTER_OERRORS, 1);
4735                         ieee80211_free_node(ni);
4736                         continue;
4737                 }
4738                 sc->sc_tx_timer = 15;
4739         }
4740         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4741 }
4742
4743 static void
4744 iwm_stop(struct iwm_softc *sc)
4745 {
4746
4747         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4748         sc->sc_flags |= IWM_FLAG_STOPPED;
4749         sc->sc_generation++;
4750         iwm_led_blink_stop(sc);
4751         sc->sc_tx_timer = 0;
4752         iwm_stop_device(sc);
4753 }
4754
4755 static void
4756 iwm_watchdog(void *arg)
4757 {
4758         struct iwm_softc *sc = arg;
4759
4760         if (sc->sc_tx_timer > 0) {
4761                 if (--sc->sc_tx_timer == 0) {
4762                         device_printf(sc->sc_dev, "device timeout\n");
4763 #ifdef IWM_DEBUG
4764                         iwm_nic_error(sc);
4765 #endif
4766                         iwm_stop(sc);
4767 #if defined(__DragonFly__)
4768                         ++sc->sc_ic.ic_oerrors;
4769 #else
4770                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4771 #endif
4772                         return;
4773                 }
4774         }
4775         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4776 }
4777
4778 static void
4779 iwm_parent(struct ieee80211com *ic)
4780 {
4781         struct iwm_softc *sc = ic->ic_softc;
4782         int startall = 0;
4783
4784         IWM_LOCK(sc);
4785         if (ic->ic_nrunning > 0) {
4786                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4787                         iwm_init(sc);
4788                         startall = 1;
4789                 }
4790         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4791                 iwm_stop(sc);
4792         IWM_UNLOCK(sc);
4793         if (startall)
4794                 ieee80211_start_all(ic);
4795 }
4796
4797 /*
4798  * The interrupt side of things
4799  */
4800
4801 /*
4802  * error dumping routines are from iwlwifi/mvm/utils.c
4803  */
4804
4805 /*
4806  * Note: This structure is read from the device with IO accesses,
4807  * and the reading already does the endian conversion. As it is
4808  * read with uint32_t-sized accesses, any members with a different size
4809  * need to be ordered correctly though!
4810  */
4811 struct iwm_error_event_table {
4812         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4813         uint32_t error_id;              /* type of error */
4814         uint32_t trm_hw_status0;        /* TRM HW status */
4815         uint32_t trm_hw_status1;        /* TRM HW status */
4816         uint32_t blink2;                /* branch link */
4817         uint32_t ilink1;                /* interrupt link */
4818         uint32_t ilink2;                /* interrupt link */
4819         uint32_t data1;         /* error-specific data */
4820         uint32_t data2;         /* error-specific data */
4821         uint32_t data3;         /* error-specific data */
4822         uint32_t bcon_time;             /* beacon timer */
4823         uint32_t tsf_low;               /* network timestamp function timer */
4824         uint32_t tsf_hi;                /* network timestamp function timer */
4825         uint32_t gp1;           /* GP1 timer register */
4826         uint32_t gp2;           /* GP2 timer register */
4827         uint32_t fw_rev_type;   /* firmware revision type */
4828         uint32_t major;         /* uCode version major */
4829         uint32_t minor;         /* uCode version minor */
4830         uint32_t hw_ver;                /* HW Silicon version */
4831         uint32_t brd_ver;               /* HW board version */
4832         uint32_t log_pc;                /* log program counter */
4833         uint32_t frame_ptr;             /* frame pointer */
4834         uint32_t stack_ptr;             /* stack pointer */
4835         uint32_t hcmd;          /* last host command header */
4836         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4837                                  * rxtx_flag */
4838         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4839                                  * host_flag */
4840         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4841                                  * enc_flag */
4842         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4843                                  * time_flag */
4844         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4845                                  * wico interrupt */
4846         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4847         uint32_t wait_event;            /* wait event() caller address */
4848         uint32_t l2p_control;   /* L2pControlField */
4849         uint32_t l2p_duration;  /* L2pDurationField */
4850         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4851         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4852         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4853                                  * (LMPM_PMG_SEL) */
4854         uint32_t u_timestamp;   /* indicate when the date and time of the
4855                                  * compilation */
4856         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4857 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4858
4859 /*
4860  * UMAC error struct - relevant starting from family 8000 chip.
4861  * Note: This structure is read from the device with IO accesses,
4862  * and the reading already does the endian conversion. As it is
4863  * read with u32-sized accesses, any members with a different size
4864  * need to be ordered correctly though!
4865  */
4866 struct iwm_umac_error_event_table {
4867         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4868         uint32_t error_id;      /* type of error */
4869         uint32_t blink1;        /* branch link */
4870         uint32_t blink2;        /* branch link */
4871         uint32_t ilink1;        /* interrupt link */
4872         uint32_t ilink2;        /* interrupt link */
4873         uint32_t data1;         /* error-specific data */
4874         uint32_t data2;         /* error-specific data */
4875         uint32_t data3;         /* error-specific data */
4876         uint32_t umac_major;
4877         uint32_t umac_minor;
4878         uint32_t frame_pointer; /* core register 27*/
4879         uint32_t stack_pointer; /* core register 28 */
4880         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4881         uint32_t nic_isr_pref;  /* ISR status register */
4882 } __packed;
4883
4884 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4885 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4886
4887 #ifdef IWM_DEBUG
4888 struct {
4889         const char *name;
4890         uint8_t num;
4891 } advanced_lookup[] = {
4892         { "NMI_INTERRUPT_WDG", 0x34 },
4893         { "SYSASSERT", 0x35 },
4894         { "UCODE_VERSION_MISMATCH", 0x37 },
4895         { "BAD_COMMAND", 0x38 },
4896         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4897         { "FATAL_ERROR", 0x3D },
4898         { "NMI_TRM_HW_ERR", 0x46 },
4899         { "NMI_INTERRUPT_TRM", 0x4C },
4900         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4901         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4902         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4903         { "NMI_INTERRUPT_HOST", 0x66 },
4904         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4905         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4906         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4907         { "ADVANCED_SYSASSERT", 0 },
4908 };
4909
4910 static const char *
4911 iwm_desc_lookup(uint32_t num)
4912 {
4913         int i;
4914
4915         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4916                 if (advanced_lookup[i].num == num)
4917                         return advanced_lookup[i].name;
4918
4919         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4920         return advanced_lookup[i].name;
4921 }
4922
4923 static void
4924 iwm_nic_umac_error(struct iwm_softc *sc)
4925 {
4926         struct iwm_umac_error_event_table table;
4927         uint32_t base;
4928
4929         base = sc->sc_uc.uc_umac_error_event_table;
4930
4931         if (base < 0x800000) {
4932                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4933                     base);
4934                 return;
4935         }
4936
4937         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4938                 device_printf(sc->sc_dev, "reading errlog failed\n");
4939                 return;
4940         }
4941
4942         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4943                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4944                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4945                     sc->sc_flags, table.valid);
4946         }
4947
4948         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4949                 iwm_desc_lookup(table.error_id));
4950         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4951         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4952         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4953             table.ilink1);
4954         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4955             table.ilink2);
4956         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4957         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4958         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4959         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4960         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4961         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4962             table.frame_pointer);
4963         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4964             table.stack_pointer);
4965         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4966         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4967             table.nic_isr_pref);
4968 }
4969
4970 /*
4971  * Support for dumping the error log seemed like a good idea ...
4972  * but it's mostly hex junk and the only sensible thing is the
4973  * hw/ucode revision (which we know anyway).  Since it's here,
4974  * I'll just leave it in, just in case e.g. the Intel guys want to
4975  * help us decipher some "ADVANCED_SYSASSERT" later.
4976  */
4977 static void
4978 iwm_nic_error(struct iwm_softc *sc)
4979 {
4980         struct iwm_error_event_table table;
4981         uint32_t base;
4982
4983         device_printf(sc->sc_dev, "dumping device error log\n");
4984         base = sc->sc_uc.uc_error_event_table;
4985         if (base < 0x800000) {
4986                 device_printf(sc->sc_dev,
4987                     "Invalid error log pointer 0x%08x\n", base);
4988                 return;
4989         }
4990
4991         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4992                 device_printf(sc->sc_dev, "reading errlog failed\n");
4993                 return;
4994         }
4995
4996         if (!table.valid) {
4997                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4998                 return;
4999         }
5000
5001         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5002                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5003                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5004                     sc->sc_flags, table.valid);
5005         }
5006
5007         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5008             iwm_desc_lookup(table.error_id));
5009         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5010             table.trm_hw_status0);
5011         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5012             table.trm_hw_status1);
5013         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5014         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5015         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5016         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5017         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5018         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5019         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5020         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5021         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5022         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5023         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5024         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5025             table.fw_rev_type);
5026         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5027         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5028         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5029         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5030         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5031         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5032         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5033         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5034         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5035         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5036         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5037         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5038         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5039         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5040         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5041         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5042         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5043         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5044         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5045
5046         if (sc->sc_uc.uc_umac_error_event_table)
5047                 iwm_nic_umac_error(sc);
5048 }
5049 #endif
5050
5051 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
5052 do {                                                                    \
5053         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5054         _var_ = (void *)((_pkt_)+1);                                    \
5055 } while (/*CONSTCOND*/0)
5056
5057 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
5058 do {                                                                    \
5059         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5060         _ptr_ = (void *)((_pkt_)+1);                                    \
5061 } while (/*CONSTCOND*/0)
5062
5063 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5064
5065 /*
5066  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5067  * Basic structure from if_iwn
5068  */
5069 static void
5070 iwm_notif_intr(struct iwm_softc *sc)
5071 {
5072         uint16_t hw;
5073
5074         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5075             BUS_DMASYNC_POSTREAD);
5076
5077         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5078
5079         /*
5080          * Process responses
5081          */
5082         while (sc->rxq.cur != hw) {
5083                 struct iwm_rx_ring *ring = &sc->rxq;
5084                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5085                 struct iwm_rx_packet *pkt;
5086                 struct iwm_cmd_response *cresp;
5087                 int qid, idx, code;
5088
5089                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5090                     BUS_DMASYNC_POSTREAD);
5091                 pkt = mtod(data->m, struct iwm_rx_packet *);
5092
5093                 qid = pkt->hdr.qid & ~0x80;
5094                 idx = pkt->hdr.idx;
5095
5096                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5097                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5098                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5099                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5100
5101                 /*
5102                  * randomly get these from the firmware, no idea why.
5103                  * they at least seem harmless, so just ignore them for now
5104                  */
5105                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5106                     || pkt->len_n_flags == htole32(0x55550000))) {
5107                         ADVANCE_RXQ(sc);
5108                         continue;
5109                 }
5110
5111                 switch (code) {
5112                 case IWM_REPLY_RX_PHY_CMD:
5113                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5114                         break;
5115
5116                 case IWM_REPLY_RX_MPDU_CMD:
5117                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5118                         break;
5119
5120                 case IWM_TX_CMD:
5121                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5122                         break;
5123
5124                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5125                         struct iwm_missed_beacons_notif *resp;
5126                         int missed;
5127
5128                         /* XXX look at mac_id to determine interface ID */
5129                         struct ieee80211com *ic = &sc->sc_ic;
5130                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5131
5132                         SYNC_RESP_STRUCT(resp, pkt);
5133                         missed = le32toh(resp->consec_missed_beacons);
5134
5135                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5136                             "%s: MISSED_BEACON: mac_id=%d, "
5137                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5138                             "num_rx=%d\n",
5139                             __func__,
5140                             le32toh(resp->mac_id),
5141                             le32toh(resp->consec_missed_beacons_since_last_rx),
5142                             le32toh(resp->consec_missed_beacons),
5143                             le32toh(resp->num_expected_beacons),
5144                             le32toh(resp->num_recvd_beacons));
5145
5146                         /* Be paranoid */
5147                         if (vap == NULL)
5148                                 break;
5149
5150                         /* XXX no net80211 locking? */
5151                         if (vap->iv_state == IEEE80211_S_RUN &&
5152                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5153                                 if (missed > vap->iv_bmissthreshold) {
5154                                         /* XXX bad locking; turn into task */
5155                                         IWM_UNLOCK(sc);
5156                                         ieee80211_beacon_miss(ic);
5157                                         IWM_LOCK(sc);
5158                                 }
5159                         }
5160
5161                         break; }
5162
5163                 case IWM_MFUART_LOAD_NOTIFICATION:
5164                         break;
5165
5166                 case IWM_MVM_ALIVE: {
5167                         struct iwm_mvm_alive_resp_v1 *resp1;
5168                         struct iwm_mvm_alive_resp_v2 *resp2;
5169                         struct iwm_mvm_alive_resp_v3 *resp3;
5170
5171                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5172                                 SYNC_RESP_STRUCT(resp1, pkt);
5173                                 sc->sc_uc.uc_error_event_table
5174                                     = le32toh(resp1->error_event_table_ptr);
5175                                 sc->sc_uc.uc_log_event_table
5176                                     = le32toh(resp1->log_event_table_ptr);
5177                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5178                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5179                                         sc->sc_uc.uc_ok = 1;
5180                                 else
5181                                         sc->sc_uc.uc_ok = 0;
5182                         }
5183
5184                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5185                                 SYNC_RESP_STRUCT(resp2, pkt);
5186                                 sc->sc_uc.uc_error_event_table
5187                                     = le32toh(resp2->error_event_table_ptr);
5188                                 sc->sc_uc.uc_log_event_table
5189                                     = le32toh(resp2->log_event_table_ptr);
5190                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5191                                 sc->sc_uc.uc_umac_error_event_table
5192                                     = le32toh(resp2->error_info_addr);
5193                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5194                                         sc->sc_uc.uc_ok = 1;
5195                                 else
5196                                         sc->sc_uc.uc_ok = 0;
5197                         }
5198
5199                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5200                                 SYNC_RESP_STRUCT(resp3, pkt);
5201                                 sc->sc_uc.uc_error_event_table
5202                                     = le32toh(resp3->error_event_table_ptr);
5203                                 sc->sc_uc.uc_log_event_table
5204                                     = le32toh(resp3->log_event_table_ptr);
5205                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5206                                 sc->sc_uc.uc_umac_error_event_table
5207                                     = le32toh(resp3->error_info_addr);
5208                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5209                                         sc->sc_uc.uc_ok = 1;
5210                                 else
5211                                         sc->sc_uc.uc_ok = 0;
5212                         }
5213
5214                         sc->sc_uc.uc_intr = 1;
5215                         wakeup(&sc->sc_uc);
5216                         break; }
5217
5218                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5219                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5220                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5221
5222                         iwm_phy_db_set_section(sc, phy_db_notif);
5223
5224                         break; }
5225
5226                 case IWM_STATISTICS_NOTIFICATION: {
5227                         struct iwm_notif_statistics *stats;
5228                         SYNC_RESP_STRUCT(stats, pkt);
5229                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5230                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
5231                         break; }
5232
5233                 case IWM_NVM_ACCESS_CMD:
5234                 case IWM_MCC_UPDATE_CMD:
5235                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5236                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5237                                     BUS_DMASYNC_POSTREAD);
5238                                 memcpy(sc->sc_cmd_resp,
5239                                     pkt, sizeof(sc->sc_cmd_resp));
5240                         }
5241                         break;
5242
5243                 case IWM_MCC_CHUB_UPDATE_CMD: {
5244                         struct iwm_mcc_chub_notif *notif;
5245                         SYNC_RESP_STRUCT(notif, pkt);
5246
5247                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5248                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5249                         sc->sc_fw_mcc[2] = '\0';
5250                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5251                             "fw source %d sent CC '%s'\n",
5252                             notif->source_id, sc->sc_fw_mcc);
5253                         break; }
5254
5255                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5256                         break;
5257
5258                 case IWM_PHY_CONFIGURATION_CMD:
5259                 case IWM_TX_ANT_CONFIGURATION_CMD:
5260                 case IWM_ADD_STA:
5261                 case IWM_MAC_CONTEXT_CMD:
5262                 case IWM_REPLY_SF_CFG_CMD:
5263                 case IWM_POWER_TABLE_CMD:
5264                 case IWM_PHY_CONTEXT_CMD:
5265                 case IWM_BINDING_CONTEXT_CMD:
5266                 case IWM_TIME_EVENT_CMD:
5267                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5268                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5269                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5270                 case IWM_REPLY_BEACON_FILTERING_CMD:
5271                 case IWM_MAC_PM_POWER_TABLE:
5272                 case IWM_TIME_QUOTA_CMD:
5273                 case IWM_REMOVE_STA:
5274                 case IWM_TXPATH_FLUSH:
5275                 case IWM_LQ_CMD:
5276                 case IWM_BT_CONFIG:
5277                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5278                         SYNC_RESP_STRUCT(cresp, pkt);
5279                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5280                                 memcpy(sc->sc_cmd_resp,
5281                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5282                         }
5283                         break;
5284
5285                 /* ignore */
5286                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5287                         break;
5288
5289                 case IWM_INIT_COMPLETE_NOTIF:
5290                         sc->sc_init_complete = 1;
5291                         wakeup(&sc->sc_init_complete);
5292                         break;
5293
5294                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5295                         struct iwm_periodic_scan_complete *notif;
5296                         SYNC_RESP_STRUCT(notif, pkt);
5297
5298                         break; }
5299
5300                 case IWM_SCAN_ITERATION_COMPLETE: {
5301                         struct iwm_lmac_scan_complete_notif *notif;
5302                         SYNC_RESP_STRUCT(notif, pkt);
5303                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5304                         break; }
5305
5306                 case IWM_SCAN_COMPLETE_UMAC: {
5307                         struct iwm_umac_scan_complete *notif;
5308                         SYNC_RESP_STRUCT(notif, pkt);
5309
5310                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5311                             "UMAC scan complete, status=0x%x\n",
5312                             notif->status);
5313 #if 0   /* XXX This would be a duplicate scan end call */
5314                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5315 #endif
5316                         break;
5317                 }
5318
5319                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5320                         struct iwm_umac_scan_iter_complete_notif *notif;
5321                         SYNC_RESP_STRUCT(notif, pkt);
5322
5323                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5324                             "complete, status=0x%x, %d channels scanned\n",
5325                             notif->status, notif->scanned_channels);
5326                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5327                         break;
5328                 }
5329
5330                 case IWM_REPLY_ERROR: {
5331                         struct iwm_error_resp *resp;
5332                         SYNC_RESP_STRUCT(resp, pkt);
5333
5334                         device_printf(sc->sc_dev,
5335                             "firmware error 0x%x, cmd 0x%x\n",
5336                             le32toh(resp->error_type),
5337                             resp->cmd_id);
5338                         break; }
5339
5340                 case IWM_TIME_EVENT_NOTIFICATION: {
5341                         struct iwm_time_event_notif *notif;
5342                         SYNC_RESP_STRUCT(notif, pkt);
5343
5344                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5345                             "TE notif status = 0x%x action = 0x%x\n",
5346                             notif->status, notif->action);
5347                         break; }
5348
5349                 case IWM_MCAST_FILTER_CMD:
5350                         break;
5351
5352                 case IWM_SCD_QUEUE_CFG: {
5353                         struct iwm_scd_txq_cfg_rsp *rsp;
5354                         SYNC_RESP_STRUCT(rsp, pkt);
5355
5356                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5357                             "queue cfg token=0x%x sta_id=%d "
5358                             "tid=%d scd_queue=%d\n",
5359                             rsp->token, rsp->sta_id, rsp->tid,
5360                             rsp->scd_queue);
5361                         break;
5362                 }
5363
5364                 default:
5365                         device_printf(sc->sc_dev,
5366                             "frame %d/%d %x UNHANDLED (this should "
5367                             "not happen)\n", qid, idx,
5368                             pkt->len_n_flags);
5369                         break;
5370                 }
5371
5372                 /*
5373                  * Why test bit 0x80?  The Linux driver:
5374                  *
5375                  * There is one exception:  uCode sets bit 15 when it
5376                  * originates the response/notification, i.e. when the
5377                  * response/notification is not a direct response to a
5378                  * command sent by the driver.  For example, uCode issues
5379                  * IWM_REPLY_RX when it sends a received frame to the driver;
5380                  * it is not a direct response to any driver command.
5381                  *
5382                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5383                  * uses a slightly different format for pkt->hdr, and "qid"
5384                  * is actually the upper byte of a two-byte field.
5385                  */
5386                 if (!(pkt->hdr.qid & (1 << 7))) {
5387                         iwm_cmd_done(sc, pkt);
5388                 }
5389
5390                 ADVANCE_RXQ(sc);
5391         }
5392
5393         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5394             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5395
5396         /*
5397          * Tell the firmware what we have processed.
5398          * Seems like the hardware gets upset unless we align
5399          * the write by 8??
5400          */
5401         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5402         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5403 }
5404
5405 static void
5406 iwm_intr(void *arg)
5407 {
5408         struct iwm_softc *sc = arg;
5409         int handled = 0;
5410         int r1, r2, rv = 0;
5411         int isperiodic = 0;
5412
5413 #if defined(__DragonFly__)
5414         if (sc->sc_mem == NULL) {
5415                 kprintf("iwm_intr: detached\n");
5416                 return;
5417         }
5418 #endif
5419         IWM_LOCK(sc);
5420         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5421
5422         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5423                 uint32_t *ict = sc->ict_dma.vaddr;
5424                 int tmp;
5425
5426                 tmp = htole32(ict[sc->ict_cur]);
5427                 if (!tmp)
5428                         goto out_ena;
5429
5430                 /*
5431                  * ok, there was something.  keep plowing until we have all.
5432                  */
5433                 r1 = r2 = 0;
5434                 while (tmp) {
5435                         r1 |= tmp;
5436                         ict[sc->ict_cur] = 0;
5437                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5438                         tmp = htole32(ict[sc->ict_cur]);
5439                 }
5440
5441                 /* this is where the fun begins.  don't ask */
5442                 if (r1 == 0xffffffff)
5443                         r1 = 0;
5444
5445                 /* i am not expected to understand this */
5446                 if (r1 & 0xc0000)
5447                         r1 |= 0x8000;
5448                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5449         } else {
5450                 r1 = IWM_READ(sc, IWM_CSR_INT);
5451                 /* "hardware gone" (where, fishing?) */
5452                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5453                         goto out;
5454                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5455         }
5456         if (r1 == 0 && r2 == 0) {
5457                 goto out_ena;
5458         }
5459
5460         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5461
5462         /* ignored */
5463         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5464
5465         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5466                 int i;
5467                 struct ieee80211com *ic = &sc->sc_ic;
5468                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5469
5470 #ifdef IWM_DEBUG
5471                 iwm_nic_error(sc);
5472 #endif
5473                 /* Dump driver status (TX and RX rings) while we're here. */
5474                 device_printf(sc->sc_dev, "driver status:\n");
5475                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5476                         struct iwm_tx_ring *ring = &sc->txq[i];
5477                         device_printf(sc->sc_dev,
5478                             "  tx ring %2d: qid=%-2d cur=%-3d "
5479                             "queued=%-3d\n",
5480                             i, ring->qid, ring->cur, ring->queued);
5481                 }
5482                 device_printf(sc->sc_dev,
5483                     "  rx ring: cur=%d\n", sc->rxq.cur);
5484                 device_printf(sc->sc_dev,
5485                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5486
5487                 /* Don't stop the device; just do a VAP restart */
5488                 IWM_UNLOCK(sc);
5489
5490                 if (vap == NULL) {
5491                         kprintf("%s: null vap\n", __func__);
5492                         return;
5493                 }
5494
5495                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5496                     "restarting\n", __func__, vap->iv_state);
5497
5498                 /* XXX TODO: turn this into a callout/taskqueue */
5499                 ieee80211_restart_all(ic);
5500                 return;
5501         }
5502
5503         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5504                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5505                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5506                 iwm_stop(sc);
5507                 rv = 1;
5508                 goto out;
5509         }
5510
5511         /* firmware chunk loaded */
5512         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5513                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5514                 handled |= IWM_CSR_INT_BIT_FH_TX;
5515                 sc->sc_fw_chunk_done = 1;
5516                 wakeup(&sc->sc_fw);
5517         }
5518
5519         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5520                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5521                 if (iwm_check_rfkill(sc)) {
5522                         device_printf(sc->sc_dev,
5523                             "%s: rfkill switch, disabling interface\n",
5524                             __func__);
5525                         iwm_stop(sc);
5526                 }
5527         }
5528
5529         /*
5530          * The Linux driver uses periodic interrupts to avoid races.
5531          * We cargo-cult like it's going out of fashion.
5532          */
5533         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5534                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5535                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5536                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5537                         IWM_WRITE_1(sc,
5538                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5539                 isperiodic = 1;
5540         }
5541
5542         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5543                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5544                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5545
5546                 iwm_notif_intr(sc);
5547
5548                 /* enable periodic interrupt, see above */
5549                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5550                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5551                             IWM_CSR_INT_PERIODIC_ENA);
5552         }
5553
5554         if (__predict_false(r1 & ~handled))
5555                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5556                     "%s: unhandled interrupts: %x\n", __func__, r1);
5557         rv = 1;
5558
5559  out_ena:
5560         iwm_restore_interrupts(sc);
5561  out:
5562         IWM_UNLOCK(sc);
5563         return;
5564 }
5565
5566 /*
5567  * Autoconf glue-sniffing
5568  */
5569 #define PCI_VENDOR_INTEL                0x8086
5570 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5571 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5572 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5573 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5574 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5575 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5576 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5577 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5578 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5579 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5580
5581 static const struct iwm_devices {
5582         uint16_t        device;
5583         const char      *name;
5584 } iwm_devices[] = {
5585         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5586         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5587         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5588         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5589         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5590         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5591         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5592         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5593         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5594         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5595 };
5596
5597 static int
5598 iwm_probe(device_t dev)
5599 {
5600         int i;
5601
5602         for (i = 0; i < nitems(iwm_devices); i++) {
5603                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5604                     pci_get_device(dev) == iwm_devices[i].device) {
5605                         device_set_desc(dev, iwm_devices[i].name);
5606                         return (BUS_PROBE_DEFAULT);
5607                 }
5608         }
5609
5610         return (ENXIO);
5611 }
5612
5613 static int
5614 iwm_dev_check(device_t dev)
5615 {
5616         struct iwm_softc *sc;
5617
5618         sc = device_get_softc(dev);
5619
5620         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5621         switch (pci_get_device(dev)) {
5622         case PCI_PRODUCT_INTEL_WL_3160_1:
5623         case PCI_PRODUCT_INTEL_WL_3160_2:
5624                 sc->sc_fwname = "iwm3160fw";
5625                 sc->host_interrupt_operation_mode = 1;
5626                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5627                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5628                 return (0);
5629         case PCI_PRODUCT_INTEL_WL_3165_1:
5630         case PCI_PRODUCT_INTEL_WL_3165_2:
5631                 sc->sc_fwname = "iwm7265fw";
5632                 sc->host_interrupt_operation_mode = 0;
5633                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5634                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5635                 return (0);
5636         case PCI_PRODUCT_INTEL_WL_7260_1:
5637         case PCI_PRODUCT_INTEL_WL_7260_2:
5638                 sc->sc_fwname = "iwm7260fw";
5639                 sc->host_interrupt_operation_mode = 1;
5640                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5641                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5642                 return (0);
5643         case PCI_PRODUCT_INTEL_WL_7265_1:
5644         case PCI_PRODUCT_INTEL_WL_7265_2:
5645                 sc->sc_fwname = "iwm7265fw";
5646                 sc->host_interrupt_operation_mode = 0;
5647                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5648                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5649                 return (0);
5650         case PCI_PRODUCT_INTEL_WL_8260_1:
5651         case PCI_PRODUCT_INTEL_WL_8260_2:
5652                 sc->sc_fwname = "iwm8000Cfw";
5653                 sc->host_interrupt_operation_mode = 0;
5654                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5655                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5656                 return (0);
5657         default:
5658                 device_printf(dev, "unknown adapter type\n");
5659                 return ENXIO;
5660         }
5661 }
5662
5663 static int
5664 iwm_pci_attach(device_t dev)
5665 {
5666         struct iwm_softc *sc;
5667         int count, error, rid;
5668         uint16_t reg;
5669 #if defined(__DragonFly__)
5670         int irq_flags;
5671 #endif
5672
5673         sc = device_get_softc(dev);
5674
5675         /* Clear device-specific "PCI retry timeout" register (41h). */
5676         reg = pci_read_config(dev, 0x40, sizeof(reg));
5677         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5678
5679         /* Enable bus-mastering and hardware bug workaround. */
5680         pci_enable_busmaster(dev);
5681         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5682         /* if !MSI */
5683         if (reg & PCIM_STATUS_INTxSTATE) {
5684                 reg &= ~PCIM_STATUS_INTxSTATE;
5685         }
5686         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5687
5688         rid = PCIR_BAR(0);
5689         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5690             RF_ACTIVE);
5691         if (sc->sc_mem == NULL) {
5692                 device_printf(sc->sc_dev, "can't map mem space\n");
5693                 return (ENXIO);
5694         }
5695         sc->sc_st = rman_get_bustag(sc->sc_mem);
5696         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5697
5698         /* Install interrupt handler. */
5699         count = 1;
5700         rid = 0;
5701 #if defined(__DragonFly__)
5702         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5703         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5704 #else
5705         if (pci_alloc_msi(dev, &count) == 0)
5706                 rid = 1;
5707         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5708             (rid != 0 ? 0 : RF_SHAREABLE));
5709 #endif
5710         if (sc->sc_irq == NULL) {
5711                 device_printf(dev, "can't map interrupt\n");
5712                         return (ENXIO);
5713         }
5714 #if defined(__DragonFly__)
5715         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5716                                iwm_intr, sc, &sc->sc_ih,
5717                                &wlan_global_serializer);
5718 #else
5719         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5720             NULL, iwm_intr, sc, &sc->sc_ih);
5721 #endif
5722         if (sc->sc_ih == NULL) {
5723                 device_printf(dev, "can't establish interrupt");
5724 #if defined(__DragonFly__)
5725                 pci_release_msi(dev);
5726 #endif
5727                         return (ENXIO);
5728         }
5729         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5730
5731         return (0);
5732 }
5733
5734 static void
5735 iwm_pci_detach(device_t dev)
5736 {
5737         struct iwm_softc *sc = device_get_softc(dev);
5738
5739         if (sc->sc_irq != NULL) {
5740                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5741                 bus_release_resource(dev, SYS_RES_IRQ,
5742                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5743                 pci_release_msi(dev);
5744 #if defined(__DragonFly__)
5745                 sc->sc_irq = NULL;
5746 #endif
5747         }
5748         if (sc->sc_mem != NULL) {
5749                 bus_release_resource(dev, SYS_RES_MEMORY,
5750                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5751 #if defined(__DragonFly__)
5752                 sc->sc_mem = NULL;
5753 #endif
5754         }
5755 }
5756
5757
5758
5759 static int
5760 iwm_attach(device_t dev)
5761 {
5762         struct iwm_softc *sc = device_get_softc(dev);
5763         struct ieee80211com *ic = &sc->sc_ic;
5764         int error;
5765         int txq_i, i;
5766
5767         sc->sc_dev = dev;
5768         IWM_LOCK_INIT(sc);
5769         mbufq_init(&sc->sc_snd, ifqmaxlen);
5770 #if defined(__DragonFly__)
5771         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5772 #else
5773         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5774 #endif
5775         callout_init(&sc->sc_led_blink_to);
5776         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5777         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5778             taskqueue_thread_enqueue, &sc->sc_tq);
5779 #if defined(__DragonFly__)
5780         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5781                                         -1, "iwm_taskq");
5782 #else
5783         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5784 #endif
5785         if (error != 0) {
5786                 device_printf(dev, "can't start threads, error %d\n",
5787                     error);
5788                 goto fail;
5789         }
5790
5791         /* PCI attach */
5792         error = iwm_pci_attach(dev);
5793         if (error != 0)
5794                 goto fail;
5795
5796         sc->sc_wantresp = -1;
5797
5798         /* Check device type */
5799         error = iwm_dev_check(dev);
5800         if (error != 0)
5801                 goto fail;
5802
5803         /*
5804          * We now start fiddling with the hardware
5805          */
5806         /*
5807          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5808          * changed, and now the revision step also includes bit 0-1 (no more
5809          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5810          * in the old format.
5811          */
5812         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5813                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5814                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5815
5816         if (iwm_prepare_card_hw(sc) != 0) {
5817                 device_printf(dev, "could not initialize hardware\n");
5818                 goto fail;
5819         }
5820
5821         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5822                 int ret;
5823                 uint32_t hw_step;
5824
5825                 /*
5826                  * In order to recognize C step the driver should read the
5827                  * chip version id located at the AUX bus MISC address.
5828                  */
5829                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5830                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5831                 DELAY(2);
5832
5833                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5834                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5835                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5836                                    25000);
5837                 if (!ret) {
5838                         device_printf(sc->sc_dev,
5839                             "Failed to wake up the nic\n");
5840                         goto fail;
5841                 }
5842
5843                 if (iwm_nic_lock(sc)) {
5844                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5845                         hw_step |= IWM_ENABLE_WFPM;
5846                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5847                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5848                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5849                         if (hw_step == 0x3)
5850                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5851                                                 (IWM_SILICON_C_STEP << 2);
5852                         iwm_nic_unlock(sc);
5853                 } else {
5854                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5855                         goto fail;
5856                 }
5857         }
5858
5859         /* Allocate DMA memory for firmware transfers. */
5860         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5861                 device_printf(dev, "could not allocate memory for firmware\n");
5862                 goto fail;
5863         }
5864
5865         /* Allocate "Keep Warm" page. */
5866         if ((error = iwm_alloc_kw(sc)) != 0) {
5867                 device_printf(dev, "could not allocate keep warm page\n");
5868                 goto fail;
5869         }
5870
5871         /* We use ICT interrupts */
5872         if ((error = iwm_alloc_ict(sc)) != 0) {
5873                 device_printf(dev, "could not allocate ICT table\n");
5874                 goto fail;
5875         }
5876
5877         /* Allocate TX scheduler "rings". */
5878         if ((error = iwm_alloc_sched(sc)) != 0) {
5879                 device_printf(dev, "could not allocate TX scheduler rings\n");
5880                 goto fail;
5881         }
5882
5883         /* Allocate TX rings */
5884         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5885                 if ((error = iwm_alloc_tx_ring(sc,
5886                     &sc->txq[txq_i], txq_i)) != 0) {
5887                         device_printf(dev,
5888                             "could not allocate TX ring %d\n",
5889                             txq_i);
5890                         goto fail;
5891                 }
5892         }
5893
5894         /* Allocate RX ring. */
5895         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5896                 device_printf(dev, "could not allocate RX ring\n");
5897                 goto fail;
5898         }
5899
5900         /* Clear pending interrupts. */
5901         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5902
5903         ic->ic_softc = sc;
5904         ic->ic_name = device_get_nameunit(sc->sc_dev);
5905         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5906         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5907
5908         /* Set device capabilities. */
5909         ic->ic_caps =
5910             IEEE80211_C_STA |
5911             IEEE80211_C_WPA |           /* WPA/RSN */
5912             IEEE80211_C_WME |
5913             IEEE80211_C_SHSLOT |        /* short slot time supported */
5914             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5915 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5916             ;
5917         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5918                 sc->sc_phyctxt[i].id = i;
5919                 sc->sc_phyctxt[i].color = 0;
5920                 sc->sc_phyctxt[i].ref = 0;
5921                 sc->sc_phyctxt[i].channel = NULL;
5922         }
5923
5924         /* Max RSSI */
5925         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5926         sc->sc_preinit_hook.ich_func = iwm_preinit;
5927         sc->sc_preinit_hook.ich_arg = sc;
5928         sc->sc_preinit_hook.ich_desc = "iwm";
5929         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5930                 device_printf(dev, "config_intrhook_establish failed\n");
5931                 goto fail;
5932         }
5933
5934 #ifdef IWM_DEBUG
5935         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5936             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5937             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5938 #endif
5939
5940         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5941             "<-%s\n", __func__);
5942
5943         return 0;
5944
5945         /* Free allocated memory if something failed during attachment. */
5946 fail:
5947         iwm_detach_local(sc, 0);
5948
5949         return ENXIO;
5950 }
5951
5952 static int
5953 iwm_is_valid_ether_addr(uint8_t *addr)
5954 {
5955         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5956
5957         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5958                 return (FALSE);
5959
5960         return (TRUE);
5961 }
5962
5963 static int
5964 iwm_update_edca(struct ieee80211com *ic)
5965 {
5966         struct iwm_softc *sc = ic->ic_softc;
5967
5968         device_printf(sc->sc_dev, "%s: called\n", __func__);
5969         return (0);
5970 }
5971
5972 static void
5973 iwm_preinit(void *arg)
5974 {
5975         struct iwm_softc *sc = arg;
5976         device_t dev = sc->sc_dev;
5977         struct ieee80211com *ic = &sc->sc_ic;
5978         int error;
5979
5980         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5981             "->%s\n", __func__);
5982
5983         IWM_LOCK(sc);
5984         if ((error = iwm_start_hw(sc)) != 0) {
5985                 device_printf(dev, "could not initialize hardware\n");
5986                 IWM_UNLOCK(sc);
5987                 goto fail;
5988         }
5989
5990         error = iwm_run_init_mvm_ucode(sc, 1);
5991         iwm_stop_device(sc);
5992         if (error) {
5993                 IWM_UNLOCK(sc);
5994                 goto fail;
5995         }
5996         device_printf(dev,
5997             "hw rev 0x%x, fw ver %s, address %s\n",
5998             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5999             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
6000
6001         /* not all hardware can do 5GHz band */
6002         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6003                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6004                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6005         IWM_UNLOCK(sc);
6006
6007         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6008             ic->ic_channels);
6009
6010         /*
6011          * At this point we've committed - if we fail to do setup,
6012          * we now also have to tear down the net80211 state.
6013          */
6014         ieee80211_ifattach(ic);
6015         ic->ic_vap_create = iwm_vap_create;
6016         ic->ic_vap_delete = iwm_vap_delete;
6017         ic->ic_raw_xmit = iwm_raw_xmit;
6018         ic->ic_node_alloc = iwm_node_alloc;
6019         ic->ic_scan_start = iwm_scan_start;
6020         ic->ic_scan_end = iwm_scan_end;
6021         ic->ic_update_mcast = iwm_update_mcast;
6022         ic->ic_getradiocaps = iwm_init_channel_map;
6023         ic->ic_set_channel = iwm_set_channel;
6024         ic->ic_scan_curchan = iwm_scan_curchan;
6025         ic->ic_scan_mindwell = iwm_scan_mindwell;
6026         ic->ic_wme.wme_update = iwm_update_edca;
6027         ic->ic_parent = iwm_parent;
6028         ic->ic_transmit = iwm_transmit;
6029         iwm_radiotap_attach(sc);
6030         if (bootverbose)
6031                 ieee80211_announce(ic);
6032
6033         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6034             "<-%s\n", __func__);
6035         config_intrhook_disestablish(&sc->sc_preinit_hook);
6036
6037         return;
6038 fail:
6039         config_intrhook_disestablish(&sc->sc_preinit_hook);
6040         iwm_detach_local(sc, 0);
6041 }
6042
6043 /*
6044  * Attach the interface to 802.11 radiotap.
6045  */
6046 static void
6047 iwm_radiotap_attach(struct iwm_softc *sc)
6048 {
6049         struct ieee80211com *ic = &sc->sc_ic;
6050
6051         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6052             "->%s begin\n", __func__);
6053         ieee80211_radiotap_attach(ic,
6054             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6055                 IWM_TX_RADIOTAP_PRESENT,
6056             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6057                 IWM_RX_RADIOTAP_PRESENT);
6058         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6059             "->%s end\n", __func__);
6060 }
6061
6062 static struct ieee80211vap *
6063 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6064     enum ieee80211_opmode opmode, int flags,
6065     const uint8_t bssid[IEEE80211_ADDR_LEN],
6066     const uint8_t mac[IEEE80211_ADDR_LEN])
6067 {
6068         struct iwm_vap *ivp;
6069         struct ieee80211vap *vap;
6070
6071         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6072                 return NULL;
6073         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6074         vap = &ivp->iv_vap;
6075         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6076         vap->iv_bmissthreshold = 10;            /* override default */
6077         /* Override with driver methods. */
6078         ivp->iv_newstate = vap->iv_newstate;
6079         vap->iv_newstate = iwm_newstate;
6080
6081         ieee80211_ratectl_init(vap);
6082         /* Complete setup. */
6083         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6084             mac);
6085         ic->ic_opmode = opmode;
6086
6087         return vap;
6088 }
6089
6090 static void
6091 iwm_vap_delete(struct ieee80211vap *vap)
6092 {
6093         struct iwm_vap *ivp = IWM_VAP(vap);
6094
6095         ieee80211_ratectl_deinit(vap);
6096         ieee80211_vap_detach(vap);
6097         kfree(ivp, M_80211_VAP);
6098 }
6099
6100 static void
6101 iwm_scan_start(struct ieee80211com *ic)
6102 {
6103         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6104         struct iwm_softc *sc = ic->ic_softc;
6105         int error;
6106
6107         IWM_LOCK(sc);
6108         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6109                 error = iwm_mvm_umac_scan(sc);
6110         else
6111                 error = iwm_mvm_lmac_scan(sc);
6112         if (error != 0) {
6113                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6114                 IWM_UNLOCK(sc);
6115                 ieee80211_cancel_scan(vap);
6116         } else {
6117                 iwm_led_blink_start(sc);
6118                 IWM_UNLOCK(sc);
6119         }
6120 }
6121
6122 static void
6123 iwm_scan_end(struct ieee80211com *ic)
6124 {
6125         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6126         struct iwm_softc *sc = ic->ic_softc;
6127
6128         IWM_LOCK(sc);
6129         iwm_led_blink_stop(sc);
6130         if (vap->iv_state == IEEE80211_S_RUN)
6131                 iwm_mvm_led_enable(sc);
6132         IWM_UNLOCK(sc);
6133 }
6134
6135 static void
6136 iwm_update_mcast(struct ieee80211com *ic)
6137 {
6138 }
6139
6140 static void
6141 iwm_set_channel(struct ieee80211com *ic)
6142 {
6143 }
6144
6145 static void
6146 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6147 {
6148 }
6149
6150 static void
6151 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6152 {
6153         return;
6154 }
6155
6156 void
6157 iwm_init_task(void *arg1)
6158 {
6159         struct iwm_softc *sc = arg1;
6160
6161         IWM_LOCK(sc);
6162         while (sc->sc_flags & IWM_FLAG_BUSY) {
6163 #if defined(__DragonFly__)
6164                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6165 #else
6166                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6167 #endif
6168 }
6169         sc->sc_flags |= IWM_FLAG_BUSY;
6170         iwm_stop(sc);
6171         if (sc->sc_ic.ic_nrunning > 0)
6172                 iwm_init(sc);
6173         sc->sc_flags &= ~IWM_FLAG_BUSY;
6174         wakeup(&sc->sc_flags);
6175         IWM_UNLOCK(sc);
6176 }
6177
6178 static int
6179 iwm_resume(device_t dev)
6180 {
6181         struct iwm_softc *sc = device_get_softc(dev);
6182         int do_reinit = 0;
6183         uint16_t reg;
6184
6185         /* Clear device-specific "PCI retry timeout" register (41h). */
6186         reg = pci_read_config(dev, 0x40, sizeof(reg));
6187         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6188         iwm_init_task(device_get_softc(dev));
6189
6190         IWM_LOCK(sc);
6191         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6192                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6193                 do_reinit = 1;
6194         }
6195         IWM_UNLOCK(sc);
6196
6197         if (do_reinit)
6198                 ieee80211_resume_all(&sc->sc_ic);
6199
6200         return 0;
6201 }
6202
6203 static int
6204 iwm_suspend(device_t dev)
6205 {
6206         int do_stop = 0;
6207         struct iwm_softc *sc = device_get_softc(dev);
6208
6209         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6210
6211         ieee80211_suspend_all(&sc->sc_ic);
6212
6213         if (do_stop) {
6214                 IWM_LOCK(sc);
6215                 iwm_stop(sc);
6216                 sc->sc_flags |= IWM_FLAG_SCANNING;
6217                 IWM_UNLOCK(sc);
6218         }
6219
6220         return (0);
6221 }
6222
6223 static int
6224 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6225 {
6226         struct iwm_fw_info *fw = &sc->sc_fw;
6227         device_t dev = sc->sc_dev;
6228         int i;
6229
6230         if (sc->sc_tq) {
6231 #if defined(__DragonFly__)
6232                 /* doesn't exist for DFly, DFly drains tasks on free */
6233 #else
6234                 taskqueue_drain_all(sc->sc_tq);
6235 #endif
6236                 taskqueue_free(sc->sc_tq);
6237 #if defined(__DragonFly__)
6238                 sc->sc_tq = NULL;
6239 #endif
6240         }
6241         callout_drain(&sc->sc_led_blink_to);
6242         callout_drain(&sc->sc_watchdog_to);
6243         iwm_stop_device(sc);
6244         if (do_net80211) {
6245                 ieee80211_ifdetach(&sc->sc_ic);
6246         }
6247
6248         iwm_phy_db_free(sc);
6249
6250         /* Free descriptor rings */
6251         iwm_free_rx_ring(sc, &sc->rxq);
6252         for (i = 0; i < nitems(sc->txq); i++)
6253                 iwm_free_tx_ring(sc, &sc->txq[i]);
6254
6255         /* Free firmware */
6256         if (fw->fw_fp != NULL)
6257                 iwm_fw_info_free(fw);
6258
6259         /* Free scheduler */
6260         iwm_dma_contig_free(&sc->sched_dma);
6261         iwm_dma_contig_free(&sc->ict_dma);
6262         iwm_dma_contig_free(&sc->kw_dma);
6263         iwm_dma_contig_free(&sc->fw_dma);
6264
6265         /* Finished with the hardware - detach things */
6266         iwm_pci_detach(dev);
6267
6268         mbufq_drain(&sc->sc_snd);
6269         IWM_LOCK_DESTROY(sc);
6270
6271         return (0);
6272 }
6273
6274 static int
6275 iwm_detach(device_t dev)
6276 {
6277         struct iwm_softc *sc = device_get_softc(dev);
6278
6279         return (iwm_detach_local(sc, 1));
6280 }
6281
6282 static device_method_t iwm_pci_methods[] = {
6283         /* Device interface */
6284         DEVMETHOD(device_probe,         iwm_probe),
6285         DEVMETHOD(device_attach,        iwm_attach),
6286         DEVMETHOD(device_detach,        iwm_detach),
6287         DEVMETHOD(device_suspend,       iwm_suspend),
6288         DEVMETHOD(device_resume,        iwm_resume),
6289
6290         DEVMETHOD_END
6291 };
6292
6293 static driver_t iwm_pci_driver = {
6294         "iwm",
6295         iwm_pci_methods,
6296         sizeof (struct iwm_softc)
6297 };
6298
6299 static devclass_t iwm_devclass;
6300
6301 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6302 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6303 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6304 MODULE_DEPEND(iwm, wlan, 1, 1, 1);