if_iwm - Remove dead code from iwm_pcie_load_cpu_sections().
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150
151 #include <machine/endian.h>
152
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155
156 #include <net/bpf.h>
157
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
193
194 const uint8_t iwm_nvm_channels[] = {
195         /* 2.4 GHz */
196         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197         /* 5 GHz */
198         36, 40, 44, 48, 52, 56, 60, 64,
199         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200         149, 153, 157, 161, 165
201 };
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203     "IWM_NUM_CHANNELS is too small");
204
205 const uint8_t iwm_nvm_channels_8000[] = {
206         /* 2.4 GHz */
207         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208         /* 5 GHz */
209         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211         149, 153, 157, 161, 165, 169, 173, 177, 181
212 };
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214     "IWM_NUM_CHANNELS_8000 is too small");
215
216 #define IWM_NUM_2GHZ_CHANNELS   14
217 #define IWM_N_HW_ADDR_MASK      0xF
218
219 /*
220  * XXX For now, there's simply a fixed set of rate table entries
221  * that are populated.
222  */
223 const struct iwm_rate {
224         uint8_t rate;
225         uint8_t plcp;
226 } iwm_rates[] = {
227         {   2,  IWM_RATE_1M_PLCP  },
228         {   4,  IWM_RATE_2M_PLCP  },
229         {  11,  IWM_RATE_5M_PLCP  },
230         {  22,  IWM_RATE_11M_PLCP },
231         {  12,  IWM_RATE_6M_PLCP  },
232         {  18,  IWM_RATE_9M_PLCP  },
233         {  24,  IWM_RATE_12M_PLCP },
234         {  36,  IWM_RATE_18M_PLCP },
235         {  48,  IWM_RATE_24M_PLCP },
236         {  72,  IWM_RATE_36M_PLCP },
237         {  96,  IWM_RATE_48M_PLCP },
238         { 108,  IWM_RATE_54M_PLCP },
239 };
240 #define IWM_RIDX_CCK    0
241 #define IWM_RIDX_OFDM   4
242 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
245
246 struct iwm_nvm_section {
247         uint16_t length;
248         uint8_t *data;
249 };
250
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
253
254 struct iwm_mvm_alive_data {
255         int valid;
256         uint32_t scd_base_addr;
257 };
258
259 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int      iwm_firmware_store_section(struct iwm_softc *,
261                                            enum iwm_ucode_type,
262                                            const uint8_t *, size_t);
263 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void     iwm_fw_info_free(struct iwm_fw_info *);
265 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
266 #if !defined(__DragonFly__)
267 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int      iwm_alloc_fwmem(struct iwm_softc *);
270 static int      iwm_alloc_sched(struct iwm_softc *);
271 static int      iwm_alloc_kw(struct iwm_softc *);
272 static int      iwm_alloc_ict(struct iwm_softc *);
273 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277                                   int);
278 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void     iwm_enable_interrupts(struct iwm_softc *);
281 static void     iwm_restore_interrupts(struct iwm_softc *);
282 static void     iwm_disable_interrupts(struct iwm_softc *);
283 static void     iwm_ict_reset(struct iwm_softc *);
284 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void     iwm_stop_device(struct iwm_softc *);
286 static void     iwm_mvm_nic_config(struct iwm_softc *);
287 static int      iwm_nic_rx_init(struct iwm_softc *);
288 static int      iwm_nic_tx_init(struct iwm_softc *);
289 static int      iwm_nic_init(struct iwm_softc *);
290 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294                                      uint16_t *, uint32_t);
295 static uint32_t iwm_eeprom_channel_flags(uint16_t);
296 static void     iwm_add_channel_band(struct iwm_softc *,
297                     struct ieee80211_channel[], int, int *, int, size_t,
298                     const uint8_t[]);
299 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
300                     struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303                            const uint16_t *, const uint16_t *,
304                            const uint16_t *, const uint16_t *,
305                            const uint16_t *);
306 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
308                                                struct iwm_nvm_data *,
309                                                const uint16_t *,
310                                                const uint16_t *);
311 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312                             const uint16_t *);
313 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315                                   const uint16_t *);
316 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
317                                    const uint16_t *);
318 static void     iwm_set_radio_cfg(const struct iwm_softc *,
319                                   struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int      iwm_nvm_init(struct iwm_softc *);
323 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324                                       const struct iwm_fw_desc *);
325 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326                                              bus_addr_t, uint32_t);
327 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328                                                 const struct iwm_fw_sects *,
329                                                 int, int *);
330 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
331                                            const struct iwm_fw_sects *,
332                                            int, int *);
333 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334                                                const struct iwm_fw_sects *);
335 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
336                                           const struct iwm_fw_sects *);
337 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
338 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341                                               enum iwm_ucode_type);
342 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
344 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
345                                             struct iwm_rx_phy_info *);
346 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347                                       struct iwm_rx_packet *);
348 static int      iwm_get_noise(struct iwm_softc *sc,
349                     const struct iwm_mvm_statistics_rx_non_phy *);
350 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
351                                     uint32_t, boolean_t);
352 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
353                                          struct iwm_rx_packet *,
354                                          struct iwm_node *);
355 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
356 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
357 #if 0
358 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
359                                  uint16_t);
360 #endif
361 static const struct iwm_rate *
362         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
363                         struct ieee80211_frame *, struct iwm_tx_cmd *);
364 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
365                        struct ieee80211_node *, int);
366 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
367                              const struct ieee80211_bpf_params *);
368 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
369 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
370 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
371 static struct ieee80211_node *
372                 iwm_node_alloc(struct ieee80211vap *,
373                                const uint8_t[IEEE80211_ADDR_LEN]);
374 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
375 static int      iwm_media_change(struct ifnet *);
376 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
377 static void     iwm_endscan_cb(void *, int);
378 static int      iwm_send_bt_init_conf(struct iwm_softc *);
379 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
380 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
381 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
382 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
383 static int      iwm_init_hw(struct iwm_softc *);
384 static void     iwm_init(struct iwm_softc *);
385 static void     iwm_start(struct iwm_softc *);
386 static void     iwm_stop(struct iwm_softc *);
387 static void     iwm_watchdog(void *);
388 static void     iwm_parent(struct ieee80211com *);
389 #ifdef IWM_DEBUG
390 static const char *
391                 iwm_desc_lookup(uint32_t);
392 static void     iwm_nic_error(struct iwm_softc *);
393 static void     iwm_nic_umac_error(struct iwm_softc *);
394 #endif
395 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
396 static void     iwm_notif_intr(struct iwm_softc *);
397 static void     iwm_intr(void *);
398 static int      iwm_attach(device_t);
399 static int      iwm_is_valid_ether_addr(uint8_t *);
400 static void     iwm_preinit(void *);
401 static int      iwm_detach_local(struct iwm_softc *sc, int);
402 static void     iwm_init_task(void *);
403 static void     iwm_radiotap_attach(struct iwm_softc *);
404 static struct ieee80211vap *
405                 iwm_vap_create(struct ieee80211com *,
406                                const char [IFNAMSIZ], int,
407                                enum ieee80211_opmode, int,
408                                const uint8_t [IEEE80211_ADDR_LEN],
409                                const uint8_t [IEEE80211_ADDR_LEN]);
410 static void     iwm_vap_delete(struct ieee80211vap *);
411 static void     iwm_scan_start(struct ieee80211com *);
412 static void     iwm_scan_end(struct ieee80211com *);
413 static void     iwm_update_mcast(struct ieee80211com *);
414 static void     iwm_set_channel(struct ieee80211com *);
415 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
416 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
417 static int      iwm_detach(device_t);
418
419 #if defined(__DragonFly__)
420 static int      iwm_msi_enable = 1;
421 static int      iwm_lar_disable = 0;
422
423 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
424 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
425
426 #endif
427
428 /*
429  * Firmware parser.
430  */
431
432 static int
433 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
434 {
435         const struct iwm_fw_cscheme_list *l = (const void *)data;
436
437         if (dlen < sizeof(*l) ||
438             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
439                 return EINVAL;
440
441         /* we don't actually store anything for now, always use s/w crypto */
442
443         return 0;
444 }
445
446 static int
447 iwm_firmware_store_section(struct iwm_softc *sc,
448     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
449 {
450         struct iwm_fw_sects *fws;
451         struct iwm_fw_desc *fwone;
452
453         if (type >= IWM_UCODE_TYPE_MAX)
454                 return EINVAL;
455         if (dlen < sizeof(uint32_t))
456                 return EINVAL;
457
458         fws = &sc->sc_fw.fw_sects[type];
459         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
460                 return EINVAL;
461
462         fwone = &fws->fw_sect[fws->fw_count];
463
464         /* first 32bit are device load offset */
465         memcpy(&fwone->offset, data, sizeof(uint32_t));
466
467         /* rest is data */
468         fwone->data = data + sizeof(uint32_t);
469         fwone->len = dlen - sizeof(uint32_t);
470
471         fws->fw_count++;
472
473         return 0;
474 }
475
476 #define IWM_DEFAULT_SCAN_CHANNELS 40
477
478 struct iwm_tlv_calib_data {
479         uint32_t ucode_type;
480         struct iwm_tlv_calib_ctrl calib;
481 } __packed;
482
483 static int
484 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
485 {
486         const struct iwm_tlv_calib_data *def_calib = data;
487         uint32_t ucode_type = le32toh(def_calib->ucode_type);
488
489         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
490                 device_printf(sc->sc_dev,
491                     "Wrong ucode_type %u for default "
492                     "calibration.\n", ucode_type);
493                 return EINVAL;
494         }
495
496         sc->sc_default_calib[ucode_type].flow_trigger =
497             def_calib->calib.flow_trigger;
498         sc->sc_default_calib[ucode_type].event_trigger =
499             def_calib->calib.event_trigger;
500
501         return 0;
502 }
503
504 static int
505 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
506                         struct iwm_ucode_capabilities *capa)
507 {
508         const struct iwm_ucode_api *ucode_api = (const void *)data;
509         uint32_t api_index = le32toh(ucode_api->api_index);
510         uint32_t api_flags = le32toh(ucode_api->api_flags);
511         int i;
512
513         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
514                 device_printf(sc->sc_dev,
515                     "api flags index %d larger than supported by driver\n",
516                     api_index);
517                 /* don't return an error so we can load FW that has more bits */
518                 return 0;
519         }
520
521         for (i = 0; i < 32; i++) {
522                 if (api_flags & (1U << i))
523                         setbit(capa->enabled_api, i + 32 * api_index);
524         }
525
526         return 0;
527 }
528
529 static int
530 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
531                            struct iwm_ucode_capabilities *capa)
532 {
533         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
534         uint32_t api_index = le32toh(ucode_capa->api_index);
535         uint32_t api_flags = le32toh(ucode_capa->api_capa);
536         int i;
537
538         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
539                 device_printf(sc->sc_dev,
540                     "capa flags index %d larger than supported by driver\n",
541                     api_index);
542                 /* don't return an error so we can load FW that has more bits */
543                 return 0;
544         }
545
546         for (i = 0; i < 32; i++) {
547                 if (api_flags & (1U << i))
548                         setbit(capa->enabled_capa, i + 32 * api_index);
549         }
550
551         return 0;
552 }
553
554 static void
555 iwm_fw_info_free(struct iwm_fw_info *fw)
556 {
557         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
558         fw->fw_fp = NULL;
559         /* don't touch fw->fw_status */
560         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
561 }
562
563 static int
564 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
565 {
566         struct iwm_fw_info *fw = &sc->sc_fw;
567         const struct iwm_tlv_ucode_header *uhdr;
568         const struct iwm_ucode_tlv *tlv;
569         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
570         enum iwm_ucode_tlv_type tlv_type;
571         const struct firmware *fwp;
572         const uint8_t *data;
573         uint32_t tlv_len;
574         uint32_t usniffer_img;
575         const uint8_t *tlv_data;
576         uint32_t paging_mem_size;
577         int num_of_cpus;
578         int error = 0;
579         size_t len;
580
581         if (fw->fw_status == IWM_FW_STATUS_DONE &&
582             ucode_type != IWM_UCODE_INIT)
583                 return 0;
584
585         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
586 #if defined(__DragonFly__)
587                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
588 #else
589                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
590 #endif
591         }
592         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
593
594         if (fw->fw_fp != NULL)
595                 iwm_fw_info_free(fw);
596
597         /*
598          * Load firmware into driver memory.
599          * fw_fp will be set.
600          */
601         IWM_UNLOCK(sc);
602         fwp = firmware_get(sc->cfg->fw_name);
603         IWM_LOCK(sc);
604         if (fwp == NULL) {
605                 device_printf(sc->sc_dev,
606                     "could not read firmware %s (error %d)\n",
607                     sc->cfg->fw_name, error);
608                 goto out;
609         }
610         fw->fw_fp = fwp;
611
612         /* (Re-)Initialize default values. */
613         capa->flags = 0;
614         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
615         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
616         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
617         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
618         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
619
620         /*
621          * Parse firmware contents
622          */
623
624         uhdr = (const void *)fw->fw_fp->data;
625         if (*(const uint32_t *)fw->fw_fp->data != 0
626             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
627                 device_printf(sc->sc_dev, "invalid firmware %s\n",
628                     sc->cfg->fw_name);
629                 error = EINVAL;
630                 goto out;
631         }
632
633         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
634             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
635             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
636             IWM_UCODE_API(le32toh(uhdr->ver)));
637         data = uhdr->data;
638         len = fw->fw_fp->datasize - sizeof(*uhdr);
639
640         while (len >= sizeof(*tlv)) {
641                 len -= sizeof(*tlv);
642                 tlv = (const void *)data;
643
644                 tlv_len = le32toh(tlv->length);
645                 tlv_type = le32toh(tlv->type);
646                 tlv_data = tlv->data;
647
648                 if (len < tlv_len) {
649                         device_printf(sc->sc_dev,
650                             "firmware too short: %zu bytes\n",
651                             len);
652                         error = EINVAL;
653                         goto parse_out;
654                 }
655                 len -= roundup2(tlv_len, 4);
656                 data += sizeof(tlv) + roundup2(tlv_len, 4);
657
658                 switch ((int)tlv_type) {
659                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
660                         if (tlv_len != sizeof(uint32_t)) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
663                                     __func__,
664                                     (int) tlv_len);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         capa->max_probe_length =
669                             le32_to_cpup((const uint32_t *)tlv_data);
670                         /* limit it to something sensible */
671                         if (capa->max_probe_length >
672                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
673                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
674                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
675                                     "ridiculous\n", __func__);
676                                 error = EINVAL;
677                                 goto parse_out;
678                         }
679                         break;
680                 case IWM_UCODE_TLV_PAN:
681                         if (tlv_len) {
682                                 device_printf(sc->sc_dev,
683                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
684                                     __func__,
685                                     (int) tlv_len);
686                                 error = EINVAL;
687                                 goto parse_out;
688                         }
689                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
690                         break;
691                 case IWM_UCODE_TLV_FLAGS:
692                         if (tlv_len < sizeof(uint32_t)) {
693                                 device_printf(sc->sc_dev,
694                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
695                                     __func__,
696                                     (int) tlv_len);
697                                 error = EINVAL;
698                                 goto parse_out;
699                         }
700                         if (tlv_len % sizeof(uint32_t)) {
701                                 device_printf(sc->sc_dev,
702                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
703                                     __func__,
704                                     (int) tlv_len);
705                                 error = EINVAL;
706                                 goto parse_out;
707                         }
708                         /*
709                          * Apparently there can be many flags, but Linux driver
710                          * parses only the first one, and so do we.
711                          *
712                          * XXX: why does this override IWM_UCODE_TLV_PAN?
713                          * Intentional or a bug?  Observations from
714                          * current firmware file:
715                          *  1) TLV_PAN is parsed first
716                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
717                          * ==> this resets TLV_PAN to itself... hnnnk
718                          */
719                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
720                         break;
721                 case IWM_UCODE_TLV_CSCHEME:
722                         if ((error = iwm_store_cscheme(sc,
723                             tlv_data, tlv_len)) != 0) {
724                                 device_printf(sc->sc_dev,
725                                     "%s: iwm_store_cscheme(): returned %d\n",
726                                     __func__,
727                                     error);
728                                 goto parse_out;
729                         }
730                         break;
731                 case IWM_UCODE_TLV_NUM_OF_CPU:
732                         if (tlv_len != sizeof(uint32_t)) {
733                                 device_printf(sc->sc_dev,
734                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
735                                     __func__,
736                                     (int) tlv_len);
737                                 error = EINVAL;
738                                 goto parse_out;
739                         }
740                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
741                         if (num_of_cpus == 2) {
742                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
743                                         TRUE;
744                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
745                                         TRUE;
746                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
747                                         TRUE;
748                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
749                                 device_printf(sc->sc_dev,
750                                     "%s: Driver supports only 1 or 2 CPUs\n",
751                                     __func__);
752                                 error = EINVAL;
753                                 goto parse_out;
754                         }
755                         break;
756                 case IWM_UCODE_TLV_SEC_RT:
757                         if ((error = iwm_firmware_store_section(sc,
758                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
759                                 device_printf(sc->sc_dev,
760                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
761                                     __func__,
762                                     error);
763                                 goto parse_out;
764                         }
765                         break;
766                 case IWM_UCODE_TLV_SEC_INIT:
767                         if ((error = iwm_firmware_store_section(sc,
768                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
769                                 device_printf(sc->sc_dev,
770                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
771                                     __func__,
772                                     error);
773                                 goto parse_out;
774                         }
775                         break;
776                 case IWM_UCODE_TLV_SEC_WOWLAN:
777                         if ((error = iwm_firmware_store_section(sc,
778                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
779                                 device_printf(sc->sc_dev,
780                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
781                                     __func__,
782                                     error);
783                                 goto parse_out;
784                         }
785                         break;
786                 case IWM_UCODE_TLV_DEF_CALIB:
787                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
788                                 device_printf(sc->sc_dev,
789                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
790                                     __func__,
791                                     (int) tlv_len,
792                                     (int) sizeof(struct iwm_tlv_calib_data));
793                                 error = EINVAL;
794                                 goto parse_out;
795                         }
796                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
797                                 device_printf(sc->sc_dev,
798                                     "%s: iwm_set_default_calib() failed: %d\n",
799                                     __func__,
800                                     error);
801                                 goto parse_out;
802                         }
803                         break;
804                 case IWM_UCODE_TLV_PHY_SKU:
805                         if (tlv_len != sizeof(uint32_t)) {
806                                 error = EINVAL;
807                                 device_printf(sc->sc_dev,
808                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
809                                     __func__,
810                                     (int) tlv_len);
811                                 goto parse_out;
812                         }
813                         sc->sc_fw.phy_config =
814                             le32_to_cpup((const uint32_t *)tlv_data);
815                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
816                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
817                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
818                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
819                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
820                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
821                         break;
822
823                 case IWM_UCODE_TLV_API_CHANGES_SET: {
824                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
825                                 error = EINVAL;
826                                 goto parse_out;
827                         }
828                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
829                                 error = EINVAL;
830                                 goto parse_out;
831                         }
832                         break;
833                 }
834
835                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
836                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
837                                 error = EINVAL;
838                                 goto parse_out;
839                         }
840                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
841                                 error = EINVAL;
842                                 goto parse_out;
843                         }
844                         break;
845                 }
846
847                 case 48: /* undocumented TLV */
848                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
849                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
850                         /* ignore, not used by current driver */
851                         break;
852
853                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
854                         if ((error = iwm_firmware_store_section(sc,
855                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
856                             tlv_len)) != 0)
857                                 goto parse_out;
858                         break;
859
860                 case IWM_UCODE_TLV_PAGING:
861                         if (tlv_len != sizeof(uint32_t)) {
862                                 error = EINVAL;
863                                 goto parse_out;
864                         }
865                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
866
867                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
868                             "%s: Paging: paging enabled (size = %u bytes)\n",
869                             __func__, paging_mem_size);
870                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
871                                 device_printf(sc->sc_dev,
872                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
873                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
874                                 error = EINVAL;
875                                 goto out;
876                         }
877                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
878                                 device_printf(sc->sc_dev,
879                                     "%s: Paging: image isn't multiple %u\n",
880                                     __func__, IWM_FW_PAGING_SIZE);
881                                 error = EINVAL;
882                                 goto out;
883                         }
884
885                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
886                             paging_mem_size;
887                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
888                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
889                             paging_mem_size;
890                         break;
891
892                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
893                         if (tlv_len != sizeof(uint32_t)) {
894                                 error = EINVAL;
895                                 goto parse_out;
896                         }
897                         capa->n_scan_channels =
898                             le32_to_cpup((const uint32_t *)tlv_data);
899                         break;
900
901                 case IWM_UCODE_TLV_FW_VERSION:
902                         if (tlv_len != sizeof(uint32_t) * 3) {
903                                 error = EINVAL;
904                                 goto parse_out;
905                         }
906                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
907                             "%d.%d.%d",
908                             le32toh(((const uint32_t *)tlv_data)[0]),
909                             le32toh(((const uint32_t *)tlv_data)[1]),
910                             le32toh(((const uint32_t *)tlv_data)[2]));
911                         break;
912
913                 case IWM_UCODE_TLV_FW_MEM_SEG:
914                         break;
915
916                 default:
917                         device_printf(sc->sc_dev,
918                             "%s: unknown firmware section %d, abort\n",
919                             __func__, tlv_type);
920                         error = EINVAL;
921                         goto parse_out;
922                 }
923         }
924
925         KASSERT(error == 0, ("unhandled error"));
926
927  parse_out:
928         if (error) {
929                 device_printf(sc->sc_dev, "firmware parse error %d, "
930                     "section type %d\n", error, tlv_type);
931         }
932
933  out:
934         if (error) {
935                 fw->fw_status = IWM_FW_STATUS_NONE;
936                 if (fw->fw_fp != NULL)
937                         iwm_fw_info_free(fw);
938         } else
939                 fw->fw_status = IWM_FW_STATUS_DONE;
940         wakeup(&sc->sc_fw);
941
942         return error;
943 }
944
945 /*
946  * DMA resource routines
947  */
948
949 /* fwmem is used to load firmware onto the card */
950 static int
951 iwm_alloc_fwmem(struct iwm_softc *sc)
952 {
953         /* Must be aligned on a 16-byte boundary. */
954         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
955             IWM_FH_MEM_TB_MAX_LENGTH, 16);
956 }
957
958 /* tx scheduler rings.  not used? */
959 static int
960 iwm_alloc_sched(struct iwm_softc *sc)
961 {
962         /* TX scheduler rings must be aligned on a 1KB boundary. */
963         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
964             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
965 }
966
967 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
968 static int
969 iwm_alloc_kw(struct iwm_softc *sc)
970 {
971         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
972 }
973
974 /* interrupt cause table */
975 static int
976 iwm_alloc_ict(struct iwm_softc *sc)
977 {
978         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
979             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
980 }
981
982 static int
983 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
984 {
985         bus_size_t size;
986         int i, error;
987
988         ring->cur = 0;
989
990         /* Allocate RX descriptors (256-byte aligned). */
991         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
992         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
993         if (error != 0) {
994                 device_printf(sc->sc_dev,
995                     "could not allocate RX ring DMA memory\n");
996                 goto fail;
997         }
998         ring->desc = ring->desc_dma.vaddr;
999
1000         /* Allocate RX status area (16-byte aligned). */
1001         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1002             sizeof(*ring->stat), 16);
1003         if (error != 0) {
1004                 device_printf(sc->sc_dev,
1005                     "could not allocate RX status DMA memory\n");
1006                 goto fail;
1007         }
1008         ring->stat = ring->stat_dma.vaddr;
1009
1010         /* Create RX buffer DMA tag. */
1011 #if defined(__DragonFly__)
1012         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1013                                    0,
1014                                    BUS_SPACE_MAXADDR_32BIT,
1015                                    BUS_SPACE_MAXADDR,
1016                                    NULL, NULL,
1017                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1018                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1019 #else
1020         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1021             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1022             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1023 #endif
1024         if (error != 0) {
1025                 device_printf(sc->sc_dev,
1026                     "%s: could not create RX buf DMA tag, error %d\n",
1027                     __func__, error);
1028                 goto fail;
1029         }
1030
1031         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1032         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1033         if (error != 0) {
1034                 device_printf(sc->sc_dev,
1035                     "%s: could not create RX buf DMA map, error %d\n",
1036                     __func__, error);
1037                 goto fail;
1038         }
1039         /*
1040          * Allocate and map RX buffers.
1041          */
1042         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1043                 struct iwm_rx_data *data = &ring->data[i];
1044                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1045                 if (error != 0) {
1046                         device_printf(sc->sc_dev,
1047                             "%s: could not create RX buf DMA map, error %d\n",
1048                             __func__, error);
1049                         goto fail;
1050                 }
1051                 data->m = NULL;
1052
1053                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1054                         goto fail;
1055                 }
1056         }
1057         return 0;
1058
1059 fail:   iwm_free_rx_ring(sc, ring);
1060         return error;
1061 }
1062
1063 static void
1064 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1065 {
1066         /* Reset the ring state */
1067         ring->cur = 0;
1068
1069         /*
1070          * The hw rx ring index in shared memory must also be cleared,
1071          * otherwise the discrepancy can cause reprocessing chaos.
1072          */
1073         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1074 }
1075
1076 static void
1077 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1078 {
1079         int i;
1080
1081         iwm_dma_contig_free(&ring->desc_dma);
1082         iwm_dma_contig_free(&ring->stat_dma);
1083
1084         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1085                 struct iwm_rx_data *data = &ring->data[i];
1086
1087                 if (data->m != NULL) {
1088                         bus_dmamap_sync(ring->data_dmat, data->map,
1089                             BUS_DMASYNC_POSTREAD);
1090                         bus_dmamap_unload(ring->data_dmat, data->map);
1091                         m_freem(data->m);
1092                         data->m = NULL;
1093                 }
1094                 if (data->map != NULL) {
1095                         bus_dmamap_destroy(ring->data_dmat, data->map);
1096                         data->map = NULL;
1097                 }
1098         }
1099         if (ring->spare_map != NULL) {
1100                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1101                 ring->spare_map = NULL;
1102         }
1103         if (ring->data_dmat != NULL) {
1104                 bus_dma_tag_destroy(ring->data_dmat);
1105                 ring->data_dmat = NULL;
1106         }
1107 }
1108
1109 static int
1110 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1111 {
1112         bus_addr_t paddr;
1113         bus_size_t size;
1114         size_t maxsize;
1115         int nsegments;
1116         int i, error;
1117
1118         ring->qid = qid;
1119         ring->queued = 0;
1120         ring->cur = 0;
1121
1122         /* Allocate TX descriptors (256-byte aligned). */
1123         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1124         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1125         if (error != 0) {
1126                 device_printf(sc->sc_dev,
1127                     "could not allocate TX ring DMA memory\n");
1128                 goto fail;
1129         }
1130         ring->desc = ring->desc_dma.vaddr;
1131
1132         /*
1133          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1134          * to allocate commands space for other rings.
1135          */
1136         if (qid > IWM_MVM_CMD_QUEUE)
1137                 return 0;
1138
1139         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1140         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1141         if (error != 0) {
1142                 device_printf(sc->sc_dev,
1143                     "could not allocate TX cmd DMA memory\n");
1144                 goto fail;
1145         }
1146         ring->cmd = ring->cmd_dma.vaddr;
1147
1148         /* FW commands may require more mapped space than packets. */
1149         if (qid == IWM_MVM_CMD_QUEUE) {
1150                 maxsize = IWM_RBUF_SIZE;
1151                 nsegments = 1;
1152         } else {
1153                 maxsize = MCLBYTES;
1154                 nsegments = IWM_MAX_SCATTER - 2;
1155         }
1156
1157 #if defined(__DragonFly__)
1158         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1159                                    0,
1160                                    BUS_SPACE_MAXADDR_32BIT,
1161                                    BUS_SPACE_MAXADDR,
1162                                    NULL, NULL,
1163                                    maxsize, nsegments, maxsize,
1164                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1165 #else
1166         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1167             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1168             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1169 #endif
1170         if (error != 0) {
1171                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1172                 goto fail;
1173         }
1174
1175         paddr = ring->cmd_dma.paddr;
1176         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1177                 struct iwm_tx_data *data = &ring->data[i];
1178
1179                 data->cmd_paddr = paddr;
1180                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1181                     + offsetof(struct iwm_tx_cmd, scratch);
1182                 paddr += sizeof(struct iwm_device_cmd);
1183
1184                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1185                 if (error != 0) {
1186                         device_printf(sc->sc_dev,
1187                             "could not create TX buf DMA map\n");
1188                         goto fail;
1189                 }
1190         }
1191         KASSERT(paddr == ring->cmd_dma.paddr + size,
1192             ("invalid physical address"));
1193         return 0;
1194
1195 fail:   iwm_free_tx_ring(sc, ring);
1196         return error;
1197 }
1198
1199 static void
1200 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1201 {
1202         int i;
1203
1204         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1205                 struct iwm_tx_data *data = &ring->data[i];
1206
1207                 if (data->m != NULL) {
1208                         bus_dmamap_sync(ring->data_dmat, data->map,
1209                             BUS_DMASYNC_POSTWRITE);
1210                         bus_dmamap_unload(ring->data_dmat, data->map);
1211                         m_freem(data->m);
1212                         data->m = NULL;
1213                 }
1214         }
1215         /* Clear TX descriptors. */
1216         memset(ring->desc, 0, ring->desc_dma.size);
1217         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1218             BUS_DMASYNC_PREWRITE);
1219         sc->qfullmsk &= ~(1 << ring->qid);
1220         ring->queued = 0;
1221         ring->cur = 0;
1222
1223         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1224                 iwm_pcie_clear_cmd_in_flight(sc);
1225 }
1226
1227 static void
1228 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1229 {
1230         int i;
1231
1232         iwm_dma_contig_free(&ring->desc_dma);
1233         iwm_dma_contig_free(&ring->cmd_dma);
1234
1235         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1236                 struct iwm_tx_data *data = &ring->data[i];
1237
1238                 if (data->m != NULL) {
1239                         bus_dmamap_sync(ring->data_dmat, data->map,
1240                             BUS_DMASYNC_POSTWRITE);
1241                         bus_dmamap_unload(ring->data_dmat, data->map);
1242                         m_freem(data->m);
1243                         data->m = NULL;
1244                 }
1245                 if (data->map != NULL) {
1246                         bus_dmamap_destroy(ring->data_dmat, data->map);
1247                         data->map = NULL;
1248                 }
1249         }
1250         if (ring->data_dmat != NULL) {
1251                 bus_dma_tag_destroy(ring->data_dmat);
1252                 ring->data_dmat = NULL;
1253         }
1254 }
1255
1256 /*
1257  * High-level hardware frobbing routines
1258  */
1259
1260 static void
1261 iwm_enable_interrupts(struct iwm_softc *sc)
1262 {
1263         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1264         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1265 }
1266
1267 static void
1268 iwm_restore_interrupts(struct iwm_softc *sc)
1269 {
1270         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1271 }
1272
1273 static void
1274 iwm_disable_interrupts(struct iwm_softc *sc)
1275 {
1276         /* disable interrupts */
1277         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1278
1279         /* acknowledge all interrupts */
1280         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1281         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1282 }
1283
1284 static void
1285 iwm_ict_reset(struct iwm_softc *sc)
1286 {
1287         iwm_disable_interrupts(sc);
1288
1289         /* Reset ICT table. */
1290         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1291         sc->ict_cur = 0;
1292
1293         /* Set physical address of ICT table (4KB aligned). */
1294         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1295             IWM_CSR_DRAM_INT_TBL_ENABLE
1296             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1297             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1298             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1299
1300         /* Switch to ICT interrupt mode in driver. */
1301         sc->sc_flags |= IWM_FLAG_USE_ICT;
1302
1303         /* Re-enable interrupts. */
1304         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1305         iwm_enable_interrupts(sc);
1306 }
1307
1308 /*
1309  * Since this .. hard-resets things, it's time to actually
1310  * mark the first vap (if any) as having no mac context.
1311  * It's annoying, but since the driver is potentially being
1312  * stop/start'ed whilst active (thanks openbsd port!) we
1313  * have to correctly track this.
1314  */
1315 static void
1316 iwm_stop_device(struct iwm_softc *sc)
1317 {
1318         struct ieee80211com *ic = &sc->sc_ic;
1319         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1320         int chnl, qid;
1321         uint32_t mask = 0;
1322
1323         /* tell the device to stop sending interrupts */
1324         iwm_disable_interrupts(sc);
1325
1326         /*
1327          * FreeBSD-local: mark the first vap as not-uploaded,
1328          * so the next transition through auth/assoc
1329          * will correctly populate the MAC context.
1330          */
1331         if (vap) {
1332                 struct iwm_vap *iv = IWM_VAP(vap);
1333                 iv->phy_ctxt = NULL;
1334                 iv->is_uploaded = 0;
1335         }
1336
1337         /* device going down, Stop using ICT table */
1338         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1339
1340         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1341
1342         if (iwm_nic_lock(sc)) {
1343                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1344
1345                 /* Stop each Tx DMA channel */
1346                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1347                         IWM_WRITE(sc,
1348                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1349                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1350                 }
1351
1352                 /* Wait for DMA channels to be idle */
1353                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1354                     5000)) {
1355                         device_printf(sc->sc_dev,
1356                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1357                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1358                 }
1359                 iwm_nic_unlock(sc);
1360         }
1361         iwm_pcie_rx_stop(sc);
1362
1363         /* Stop RX ring. */
1364         iwm_reset_rx_ring(sc, &sc->rxq);
1365
1366         /* Reset all TX rings. */
1367         for (qid = 0; qid < nitems(sc->txq); qid++)
1368                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1369
1370         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1371                 /* Power-down device's busmaster DMA clocks */
1372                 if (iwm_nic_lock(sc)) {
1373                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1374                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1375                         iwm_nic_unlock(sc);
1376                 }
1377                 DELAY(5);
1378         }
1379
1380         /* Make sure (redundant) we've released our request to stay awake */
1381         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1382             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1383
1384         /* Stop the device, and put it in low power state */
1385         iwm_apm_stop(sc);
1386
1387         /* stop and reset the on-board processor */
1388         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1389         DELAY(1000);
1390
1391         /*
1392          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1393          * This is a bug in certain verions of the hardware.
1394          * Certain devices also keep sending HW RF kill interrupt all
1395          * the time, unless the interrupt is ACKed even if the interrupt
1396          * should be masked. Re-ACK all the interrupts here.
1397          */
1398         iwm_disable_interrupts(sc);
1399
1400         /*
1401          * Even if we stop the HW, we still want the RF kill
1402          * interrupt
1403          */
1404         iwm_enable_rfkill_int(sc);
1405         iwm_check_rfkill(sc);
1406 }
1407
1408 static void
1409 iwm_mvm_nic_config(struct iwm_softc *sc)
1410 {
1411         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1412         uint32_t reg_val = 0;
1413         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1414
1415         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1416             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1417         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1418             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1419         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1420             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1421
1422         /* SKU control */
1423         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1424             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1425         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1426             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1427
1428         /* radio configuration */
1429         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1430         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1431         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1432
1433         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1434
1435         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1436             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1437             radio_cfg_step, radio_cfg_dash);
1438
1439         /*
1440          * W/A : NIC is stuck in a reset state after Early PCIe power off
1441          * (PCIe power is lost before PERST# is asserted), causing ME FW
1442          * to lose ownership and not being able to obtain it back.
1443          */
1444         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1445                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1446                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1447                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1448         }
1449 }
1450
1451 static int
1452 iwm_nic_rx_init(struct iwm_softc *sc)
1453 {
1454         /*
1455          * Initialize RX ring.  This is from the iwn driver.
1456          */
1457         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1458
1459         /* Stop Rx DMA */
1460         iwm_pcie_rx_stop(sc);
1461
1462         if (!iwm_nic_lock(sc))
1463                 return EBUSY;
1464
1465         /* reset and flush pointers */
1466         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1467         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1468         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1469         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1470
1471         /* Set physical address of RX ring (256-byte aligned). */
1472         IWM_WRITE(sc,
1473             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1474
1475         /* Set physical address of RX status (16-byte aligned). */
1476         IWM_WRITE(sc,
1477             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1478
1479 #if defined(__DragonFly__)
1480         /* Force serialization (probably not needed but don't trust the HW) */
1481         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1482 #endif
1483
1484         /* Enable Rx DMA
1485          * XXX 5000 HW isn't supported by the iwm(4) driver.
1486          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1487          *      the credit mechanism in 5000 HW RX FIFO
1488          * Direct rx interrupts to hosts
1489          * Rx buffer size 4 or 8k or 12k
1490          * RB timeout 0x10
1491          * 256 RBDs
1492          */
1493         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1494             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1495             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1496             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1497             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1498             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1499             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1500
1501         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1502
1503         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1504         if (sc->cfg->host_interrupt_operation_mode)
1505                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1506
1507         /*
1508          * Thus sayeth el jefe (iwlwifi) via a comment:
1509          *
1510          * This value should initially be 0 (before preparing any
1511          * RBs), should be 8 after preparing the first 8 RBs (for example)
1512          */
1513         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1514
1515         iwm_nic_unlock(sc);
1516
1517         return 0;
1518 }
1519
1520 static int
1521 iwm_nic_tx_init(struct iwm_softc *sc)
1522 {
1523         int qid;
1524
1525         if (!iwm_nic_lock(sc))
1526                 return EBUSY;
1527
1528         /* Deactivate TX scheduler. */
1529         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1530
1531         /* Set physical address of "keep warm" page (16-byte aligned). */
1532         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1533
1534         /* Initialize TX rings. */
1535         for (qid = 0; qid < nitems(sc->txq); qid++) {
1536                 struct iwm_tx_ring *txq = &sc->txq[qid];
1537
1538                 /* Set physical address of TX ring (256-byte aligned). */
1539                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1540                     txq->desc_dma.paddr >> 8);
1541                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1542                     "%s: loading ring %d descriptors (%p) at %lx\n",
1543                     __func__,
1544                     qid, txq->desc,
1545                     (unsigned long) (txq->desc_dma.paddr >> 8));
1546         }
1547
1548         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1549
1550         iwm_nic_unlock(sc);
1551
1552         return 0;
1553 }
1554
1555 static int
1556 iwm_nic_init(struct iwm_softc *sc)
1557 {
1558         int error;
1559
1560         iwm_apm_init(sc);
1561         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1562                 iwm_set_pwr(sc);
1563
1564         iwm_mvm_nic_config(sc);
1565
1566         if ((error = iwm_nic_rx_init(sc)) != 0)
1567                 return error;
1568
1569         /*
1570          * Ditto for TX, from iwn
1571          */
1572         if ((error = iwm_nic_tx_init(sc)) != 0)
1573                 return error;
1574
1575         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1576             "%s: shadow registers enabled\n", __func__);
1577         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1578
1579         return 0;
1580 }
1581
1582 int
1583 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1584 {
1585         if (!iwm_nic_lock(sc)) {
1586                 device_printf(sc->sc_dev,
1587                     "%s: cannot enable txq %d\n",
1588                     __func__,
1589                     qid);
1590                 return EBUSY;
1591         }
1592
1593         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1594
1595         if (qid == IWM_MVM_CMD_QUEUE) {
1596                 /* unactivate before configuration */
1597                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1598                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1599                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1600
1601                 iwm_nic_unlock(sc);
1602
1603                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1604
1605                 if (!iwm_nic_lock(sc)) {
1606                         device_printf(sc->sc_dev,
1607                             "%s: cannot enable txq %d\n", __func__, qid);
1608                         return EBUSY;
1609                 }
1610                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1611                 iwm_nic_unlock(sc);
1612
1613                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1614                 /* Set scheduler window size and frame limit. */
1615                 iwm_write_mem32(sc,
1616                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1617                     sizeof(uint32_t),
1618                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1619                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1620                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1621                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1622
1623                 if (!iwm_nic_lock(sc)) {
1624                         device_printf(sc->sc_dev,
1625                             "%s: cannot enable txq %d\n", __func__, qid);
1626                         return EBUSY;
1627                 }
1628                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1629                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1630                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1631                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1632                     IWM_SCD_QUEUE_STTS_REG_MSK);
1633         } else {
1634                 struct iwm_scd_txq_cfg_cmd cmd;
1635                 int error;
1636
1637                 iwm_nic_unlock(sc);
1638
1639                 memset(&cmd, 0, sizeof(cmd));
1640                 cmd.scd_queue = qid;
1641                 cmd.enable = 1;
1642                 cmd.sta_id = sta_id;
1643                 cmd.tx_fifo = fifo;
1644                 cmd.aggregate = 0;
1645                 cmd.window = IWM_FRAME_LIMIT;
1646
1647                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1648                     sizeof(cmd), &cmd);
1649                 if (error) {
1650                         device_printf(sc->sc_dev,
1651                             "cannot enable txq %d\n", qid);
1652                         return error;
1653                 }
1654
1655                 if (!iwm_nic_lock(sc))
1656                         return EBUSY;
1657         }
1658
1659         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1660             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1661
1662         iwm_nic_unlock(sc);
1663
1664         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1665             __func__, qid, fifo);
1666
1667         return 0;
1668 }
1669
1670 static int
1671 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1672 {
1673         int error, chnl;
1674
1675         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1676             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1677
1678         if (!iwm_nic_lock(sc))
1679                 return EBUSY;
1680
1681         iwm_ict_reset(sc);
1682
1683         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1684         if (scd_base_addr != 0 &&
1685             scd_base_addr != sc->scd_base_addr) {
1686                 device_printf(sc->sc_dev,
1687                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1688                     __func__, sc->scd_base_addr, scd_base_addr);
1689         }
1690
1691         iwm_nic_unlock(sc);
1692
1693         /* reset context data, TX status and translation data */
1694         error = iwm_write_mem(sc,
1695             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1696             NULL, clear_dwords);
1697         if (error)
1698                 return EBUSY;
1699
1700         if (!iwm_nic_lock(sc))
1701                 return EBUSY;
1702
1703         /* Set physical address of TX scheduler rings (1KB aligned). */
1704         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1705
1706         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1707
1708         iwm_nic_unlock(sc);
1709
1710         /* enable command channel */
1711         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1712         if (error)
1713                 return error;
1714
1715         if (!iwm_nic_lock(sc))
1716                 return EBUSY;
1717
1718         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1719
1720         /* Enable DMA channels. */
1721         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1722                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1723                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1724                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1725         }
1726
1727         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1728             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1729
1730         iwm_nic_unlock(sc);
1731
1732         /* Enable L1-Active */
1733         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1734                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1735                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1736         }
1737
1738         return error;
1739 }
1740
1741 /*
1742  * NVM read access and content parsing.  We do not support
1743  * external NVM or writing NVM.
1744  * iwlwifi/mvm/nvm.c
1745  */
1746
1747 /* Default NVM size to read */
1748 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1749
1750 #define IWM_NVM_WRITE_OPCODE 1
1751 #define IWM_NVM_READ_OPCODE 0
1752
1753 /* load nvm chunk response */
1754 enum {
1755         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1756         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1757 };
1758
1759 static int
1760 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1761         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1762 {
1763         struct iwm_nvm_access_cmd nvm_access_cmd = {
1764                 .offset = htole16(offset),
1765                 .length = htole16(length),
1766                 .type = htole16(section),
1767                 .op_code = IWM_NVM_READ_OPCODE,
1768         };
1769         struct iwm_nvm_access_resp *nvm_resp;
1770         struct iwm_rx_packet *pkt;
1771         struct iwm_host_cmd cmd = {
1772                 .id = IWM_NVM_ACCESS_CMD,
1773                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1774                 .data = { &nvm_access_cmd, },
1775         };
1776         int ret, bytes_read, offset_read;
1777         uint8_t *resp_data;
1778
1779         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1780
1781         ret = iwm_send_cmd(sc, &cmd);
1782         if (ret) {
1783                 device_printf(sc->sc_dev,
1784                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1785                 return ret;
1786         }
1787
1788         pkt = cmd.resp_pkt;
1789
1790         /* Extract NVM response */
1791         nvm_resp = (void *)pkt->data;
1792         ret = le16toh(nvm_resp->status);
1793         bytes_read = le16toh(nvm_resp->length);
1794         offset_read = le16toh(nvm_resp->offset);
1795         resp_data = nvm_resp->data;
1796         if (ret) {
1797                 if ((offset != 0) &&
1798                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1799                         /*
1800                          * meaning of NOT_VALID_ADDRESS:
1801                          * driver try to read chunk from address that is
1802                          * multiple of 2K and got an error since addr is empty.
1803                          * meaning of (offset != 0): driver already
1804                          * read valid data from another chunk so this case
1805                          * is not an error.
1806                          */
1807                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1808                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1809                                     offset);
1810                         *len = 0;
1811                         ret = 0;
1812                 } else {
1813                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1814                                     "NVM access command failed with status %d\n", ret);
1815                         ret = EIO;
1816                 }
1817                 goto exit;
1818         }
1819
1820         if (offset_read != offset) {
1821                 device_printf(sc->sc_dev,
1822                     "NVM ACCESS response with invalid offset %d\n",
1823                     offset_read);
1824                 ret = EINVAL;
1825                 goto exit;
1826         }
1827
1828         if (bytes_read > length) {
1829                 device_printf(sc->sc_dev,
1830                     "NVM ACCESS response with too much data "
1831                     "(%d bytes requested, %d bytes received)\n",
1832                     length, bytes_read);
1833                 ret = EINVAL;
1834                 goto exit;
1835         }
1836
1837         /* Write data to NVM */
1838         memcpy(data + offset, resp_data, bytes_read);
1839         *len = bytes_read;
1840
1841  exit:
1842         iwm_free_resp(sc, &cmd);
1843         return ret;
1844 }
1845
1846 /*
1847  * Reads an NVM section completely.
1848  * NICs prior to 7000 family don't have a real NVM, but just read
1849  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1850  * by uCode, we need to manually check in this case that we don't
1851  * overflow and try to read more than the EEPROM size.
1852  * For 7000 family NICs, we supply the maximal size we can read, and
1853  * the uCode fills the response with as much data as we can,
1854  * without overflowing, so no check is needed.
1855  */
1856 static int
1857 iwm_nvm_read_section(struct iwm_softc *sc,
1858         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1859 {
1860         uint16_t seglen, length, offset = 0;
1861         int ret;
1862
1863         /* Set nvm section read length */
1864         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1865
1866         seglen = length;
1867
1868         /* Read the NVM until exhausted (reading less than requested) */
1869         while (seglen == length) {
1870                 /* Check no memory assumptions fail and cause an overflow */
1871                 if ((size_read + offset + length) >
1872                     sc->cfg->eeprom_size) {
1873                         device_printf(sc->sc_dev,
1874                             "EEPROM size is too small for NVM\n");
1875                         return ENOBUFS;
1876                 }
1877
1878                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1879                 if (ret) {
1880                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1881                                     "Cannot read NVM from section %d offset %d, length %d\n",
1882                                     section, offset, length);
1883                         return ret;
1884                 }
1885                 offset += seglen;
1886         }
1887
1888         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1889                     "NVM section %d read completed\n", section);
1890         *len = offset;
1891         return 0;
1892 }
1893
1894 /* NVM offsets (in words) definitions */
1895 enum iwm_nvm_offsets {
1896         /* NVM HW-Section offset (in words) definitions */
1897         IWM_HW_ADDR = 0x15,
1898
1899 /* NVM SW-Section offset (in words) definitions */
1900         IWM_NVM_SW_SECTION = 0x1C0,
1901         IWM_NVM_VERSION = 0,
1902         IWM_RADIO_CFG = 1,
1903         IWM_SKU = 2,
1904         IWM_N_HW_ADDRS = 3,
1905         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1906
1907 /* NVM calibration section offset (in words) definitions */
1908         IWM_NVM_CALIB_SECTION = 0x2B8,
1909         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1910 };
1911
1912 enum iwm_8000_nvm_offsets {
1913         /* NVM HW-Section offset (in words) definitions */
1914         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1915         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1916         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1917         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1918         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1919
1920         /* NVM SW-Section offset (in words) definitions */
1921         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1922         IWM_NVM_VERSION_8000 = 0,
1923         IWM_RADIO_CFG_8000 = 0,
1924         IWM_SKU_8000 = 2,
1925         IWM_N_HW_ADDRS_8000 = 3,
1926
1927         /* NVM REGULATORY -Section offset (in words) definitions */
1928         IWM_NVM_CHANNELS_8000 = 0,
1929         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1930         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1931         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1932
1933         /* NVM calibration section offset (in words) definitions */
1934         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1935         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1936 };
1937
1938 /* SKU Capabilities (actual values from NVM definition) */
1939 enum nvm_sku_bits {
1940         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1941         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1942         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1943         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1944 };
1945
1946 /* radio config bits (actual values from NVM definition) */
1947 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1948 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1949 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1950 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1951 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1952 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1953
1954 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1955 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1956 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1957 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1958 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1959 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1960
1961 #define DEFAULT_MAX_TX_POWER 16
1962
1963 /**
1964  * enum iwm_nvm_channel_flags - channel flags in NVM
1965  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1966  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1967  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1968  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1969  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1970  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1971  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1972  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1973  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1974  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1975  */
1976 enum iwm_nvm_channel_flags {
1977         IWM_NVM_CHANNEL_VALID = (1 << 0),
1978         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1979         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1980         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1981         IWM_NVM_CHANNEL_DFS = (1 << 7),
1982         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1983         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1984         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1985         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1986 };
1987
1988 /*
1989  * Translate EEPROM flags to net80211.
1990  */
1991 static uint32_t
1992 iwm_eeprom_channel_flags(uint16_t ch_flags)
1993 {
1994         uint32_t nflags;
1995
1996         nflags = 0;
1997         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1998                 nflags |= IEEE80211_CHAN_PASSIVE;
1999         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2000                 nflags |= IEEE80211_CHAN_NOADHOC;
2001         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2002                 nflags |= IEEE80211_CHAN_DFS;
2003                 /* Just in case. */
2004                 nflags |= IEEE80211_CHAN_NOADHOC;
2005         }
2006
2007         return (nflags);
2008 }
2009
2010 static void
2011 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2012     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2013     const uint8_t bands[])
2014 {
2015         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2016         uint32_t nflags;
2017         uint16_t ch_flags;
2018         uint8_t ieee;
2019         int error;
2020
2021         for (; ch_idx < ch_num; ch_idx++) {
2022                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2023                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2024                         ieee = iwm_nvm_channels[ch_idx];
2025                 else
2026                         ieee = iwm_nvm_channels_8000[ch_idx];
2027
2028                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2029                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2030                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2031                             ieee, ch_flags,
2032                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2033                             "5.2" : "2.4");
2034                         continue;
2035                 }
2036
2037                 nflags = iwm_eeprom_channel_flags(ch_flags);
2038                 error = ieee80211_add_channel(chans, maxchans, nchans,
2039                     ieee, 0, 0, nflags, bands);
2040                 if (error != 0)
2041                         break;
2042
2043                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2044                     "Ch. %d Flags %x [%sGHz] - Added\n",
2045                     ieee, ch_flags,
2046                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2047                     "5.2" : "2.4");
2048         }
2049 }
2050
2051 static void
2052 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2053     struct ieee80211_channel chans[])
2054 {
2055         struct iwm_softc *sc = ic->ic_softc;
2056         struct iwm_nvm_data *data = sc->nvm_data;
2057         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2058         size_t ch_num;
2059
2060         memset(bands, 0, sizeof(bands));
2061         /* 1-13: 11b/g channels. */
2062         setbit(bands, IEEE80211_MODE_11B);
2063         setbit(bands, IEEE80211_MODE_11G);
2064         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2065             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2066
2067         /* 14: 11b channel only. */
2068         clrbit(bands, IEEE80211_MODE_11G);
2069         iwm_add_channel_band(sc, chans, maxchans, nchans,
2070             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2071
2072         if (data->sku_cap_band_52GHz_enable) {
2073                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2074                         ch_num = nitems(iwm_nvm_channels);
2075                 else
2076                         ch_num = nitems(iwm_nvm_channels_8000);
2077                 memset(bands, 0, sizeof(bands));
2078                 setbit(bands, IEEE80211_MODE_11A);
2079                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2080                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2081         }
2082 }
2083
2084 static void
2085 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2086         const uint16_t *mac_override, const uint16_t *nvm_hw)
2087 {
2088         const uint8_t *hw_addr;
2089
2090         if (mac_override) {
2091                 static const uint8_t reserved_mac[] = {
2092                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2093                 };
2094
2095                 hw_addr = (const uint8_t *)(mac_override +
2096                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2097
2098                 /*
2099                  * Store the MAC address from MAO section.
2100                  * No byte swapping is required in MAO section
2101                  */
2102                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2103
2104                 /*
2105                  * Force the use of the OTP MAC address in case of reserved MAC
2106                  * address in the NVM, or if address is given but invalid.
2107                  */
2108                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2109                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2110                     iwm_is_valid_ether_addr(data->hw_addr) &&
2111                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2112                         return;
2113
2114                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2115                     "%s: mac address from nvm override section invalid\n",
2116                     __func__);
2117         }
2118
2119         if (nvm_hw) {
2120                 /* read the mac address from WFMP registers */
2121                 uint32_t mac_addr0 =
2122                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2123                 uint32_t mac_addr1 =
2124                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2125
2126                 hw_addr = (const uint8_t *)&mac_addr0;
2127                 data->hw_addr[0] = hw_addr[3];
2128                 data->hw_addr[1] = hw_addr[2];
2129                 data->hw_addr[2] = hw_addr[1];
2130                 data->hw_addr[3] = hw_addr[0];
2131
2132                 hw_addr = (const uint8_t *)&mac_addr1;
2133                 data->hw_addr[4] = hw_addr[1];
2134                 data->hw_addr[5] = hw_addr[0];
2135
2136                 return;
2137         }
2138
2139         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2140         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2141 }
2142
2143 static int
2144 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2145             const uint16_t *phy_sku)
2146 {
2147         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2148                 return le16_to_cpup(nvm_sw + IWM_SKU);
2149
2150         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2151 }
2152
2153 static int
2154 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2155 {
2156         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2157                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2158         else
2159                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2160                                                 IWM_NVM_VERSION_8000));
2161 }
2162
2163 static int
2164 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2165                   const uint16_t *phy_sku)
2166 {
2167         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2168                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2169
2170         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2171 }
2172
2173 static int
2174 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2175 {
2176         int n_hw_addr;
2177
2178         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2179                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2180
2181         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2182
2183         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2184 }
2185
2186 static void
2187 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2188                   uint32_t radio_cfg)
2189 {
2190         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2191                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2192                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2193                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2194                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2195                 return;
2196         }
2197
2198         /* set the radio configuration for family 8000 */
2199         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2200         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2201         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2202         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2203         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2204         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2205 }
2206
2207 static int
2208 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2209                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2210 {
2211 #ifdef notyet /* for FAMILY 9000 */
2212         if (cfg->mac_addr_from_csr) {
2213                 iwm_set_hw_address_from_csr(sc, data);
2214         } else
2215 #endif
2216         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2217                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2218
2219                 /* The byte order is little endian 16 bit, meaning 214365 */
2220                 data->hw_addr[0] = hw_addr[1];
2221                 data->hw_addr[1] = hw_addr[0];
2222                 data->hw_addr[2] = hw_addr[3];
2223                 data->hw_addr[3] = hw_addr[2];
2224                 data->hw_addr[4] = hw_addr[5];
2225                 data->hw_addr[5] = hw_addr[4];
2226         } else {
2227                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2228         }
2229
2230         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2231                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2232                 return EINVAL;
2233         }
2234
2235         return 0;
2236 }
2237
2238 static struct iwm_nvm_data *
2239 iwm_parse_nvm_data(struct iwm_softc *sc,
2240                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2241                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2242                    const uint16_t *phy_sku, const uint16_t *regulatory)
2243 {
2244         struct iwm_nvm_data *data;
2245         uint32_t sku, radio_cfg;
2246         uint16_t lar_config;
2247
2248         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2249                 data = kmalloc(sizeof(*data) +
2250                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2251                     M_DEVBUF, M_WAITOK | M_ZERO);
2252         } else {
2253                 data = kmalloc(sizeof(*data) +
2254                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2255                     M_DEVBUF, M_WAITOK | M_ZERO);
2256         }
2257         if (!data)
2258                 return NULL;
2259
2260         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2261
2262         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2263         iwm_set_radio_cfg(sc, data, radio_cfg);
2264
2265         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2266         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2267         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2268         data->sku_cap_11n_enable = 0;
2269
2270         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2271
2272         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2273                 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2274                                        IWM_NVM_LAR_OFFSET_8000_OLD :
2275                                        IWM_NVM_LAR_OFFSET_8000;
2276
2277                 lar_config = le16_to_cpup(regulatory + lar_offset);
2278                 data->lar_enabled = !!(lar_config &
2279                                        IWM_NVM_LAR_ENABLED_8000);
2280         }
2281
2282         /* If no valid mac address was found - bail out */
2283         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2284                 kfree(data, M_DEVBUF);
2285                 return NULL;
2286         }
2287
2288         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2289                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2290                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2291         } else {
2292                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2293                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2294         }
2295
2296         return data;
2297 }
2298
2299 static void
2300 iwm_free_nvm_data(struct iwm_nvm_data *data)
2301 {
2302         if (data != NULL)
2303                 kfree(data, M_DEVBUF);
2304 }
2305
2306 static struct iwm_nvm_data *
2307 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2308 {
2309         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2310
2311         /* Checking for required sections */
2312         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2313                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2314                     !sections[sc->cfg->nvm_hw_section_num].data) {
2315                         device_printf(sc->sc_dev,
2316                             "Can't parse empty OTP/NVM sections\n");
2317                         return NULL;
2318                 }
2319         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2320                 /* SW and REGULATORY sections are mandatory */
2321                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2322                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2323                         device_printf(sc->sc_dev,
2324                             "Can't parse empty OTP/NVM sections\n");
2325                         return NULL;
2326                 }
2327                 /* MAC_OVERRIDE or at least HW section must exist */
2328                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2329                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2330                         device_printf(sc->sc_dev,
2331                             "Can't parse mac_address, empty sections\n");
2332                         return NULL;
2333                 }
2334
2335                 /* PHY_SKU section is mandatory in B0 */
2336                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2337                         device_printf(sc->sc_dev,
2338                             "Can't parse phy_sku in B0, empty sections\n");
2339                         return NULL;
2340                 }
2341         } else {
2342                 panic("unknown device family %d\n", sc->cfg->device_family);
2343         }
2344
2345         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2346         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2347         calib = (const uint16_t *)
2348             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2349         regulatory = (const uint16_t *)
2350             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2351         mac_override = (const uint16_t *)
2352             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2353         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2354
2355         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2356             phy_sku, regulatory);
2357 }
2358
2359 static int
2360 iwm_nvm_init(struct iwm_softc *sc)
2361 {
2362         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2363         int i, ret, section;
2364         uint32_t size_read = 0;
2365         uint8_t *nvm_buffer, *temp;
2366         uint16_t len;
2367
2368         memset(nvm_sections, 0, sizeof(nvm_sections));
2369
2370         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2371                 return EINVAL;
2372
2373         /* load NVM values from nic */
2374         /* Read From FW NVM */
2375         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2376
2377         nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2378             M_INTWAIT | M_ZERO);
2379         if (!nvm_buffer)
2380                 return ENOMEM;
2381         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2382                 /* we override the constness for initial read */
2383                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2384                                            &len, size_read);
2385                 if (ret)
2386                         continue;
2387                 size_read += len;
2388                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2389                 if (!temp) {
2390                         ret = ENOMEM;
2391                         break;
2392                 }
2393                 memcpy(temp, nvm_buffer, len);
2394
2395                 nvm_sections[section].data = temp;
2396                 nvm_sections[section].length = len;
2397         }
2398         if (!size_read)
2399                 device_printf(sc->sc_dev, "OTP is blank\n");
2400         kfree(nvm_buffer, M_DEVBUF);
2401
2402         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2403         if (!sc->nvm_data)
2404                 return EINVAL;
2405         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2406                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2407
2408         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2409                 if (nvm_sections[i].data != NULL)
2410                         kfree(nvm_sections[i].data, M_DEVBUF);
2411         }
2412
2413         return 0;
2414 }
2415
2416 static int
2417 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2418         const struct iwm_fw_desc *section)
2419 {
2420         struct iwm_dma_info *dma = &sc->fw_dma;
2421         uint8_t *v_addr;
2422         bus_addr_t p_addr;
2423         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2424         int ret = 0;
2425
2426         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2427                     "%s: [%d] uCode section being loaded...\n",
2428                     __func__, section_num);
2429
2430         v_addr = dma->vaddr;
2431         p_addr = dma->paddr;
2432
2433         for (offset = 0; offset < section->len; offset += chunk_sz) {
2434                 uint32_t copy_size, dst_addr;
2435                 int extended_addr = FALSE;
2436
2437                 copy_size = MIN(chunk_sz, section->len - offset);
2438                 dst_addr = section->offset + offset;
2439
2440                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2441                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2442                         extended_addr = TRUE;
2443
2444                 if (extended_addr)
2445                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2446                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2447
2448                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2449                     copy_size);
2450                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2451                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2452                                                    copy_size);
2453
2454                 if (extended_addr)
2455                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2456                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2457
2458                 if (ret) {
2459                         device_printf(sc->sc_dev,
2460                             "%s: Could not load the [%d] uCode section\n",
2461                             __func__, section_num);
2462                         break;
2463                 }
2464         }
2465
2466         return ret;
2467 }
2468
2469 /*
2470  * ucode
2471  */
2472 static int
2473 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2474                              bus_addr_t phy_addr, uint32_t byte_cnt)
2475 {
2476         int ret;
2477
2478         sc->sc_fw_chunk_done = 0;
2479
2480         if (!iwm_nic_lock(sc))
2481                 return EBUSY;
2482
2483         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2484             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2485
2486         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2487             dst_addr);
2488
2489         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2490             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2491
2492         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2493             (iwm_get_dma_hi_addr(phy_addr)
2494              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2495
2496         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2497             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2498             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2499             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2500
2501         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2502             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2503             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2504             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2505
2506         iwm_nic_unlock(sc);
2507
2508         /* wait up to 5s for this segment to load */
2509         ret = 0;
2510         while (!sc->sc_fw_chunk_done) {
2511 #if defined(__DragonFly__)
2512                 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2513 #else
2514                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2515 #endif
2516                 if (ret)
2517                         break;
2518         }
2519
2520         if (ret != 0) {
2521                 device_printf(sc->sc_dev,
2522                     "fw chunk addr 0x%x len %d failed to load\n",
2523                     dst_addr, byte_cnt);
2524                 return ETIMEDOUT;
2525         }
2526
2527         return 0;
2528 }
2529
2530 static int
2531 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2532         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2533 {
2534         int shift_param;
2535         int i, ret = 0, sec_num = 0x1;
2536         uint32_t val, last_read_idx = 0;
2537
2538         if (cpu == 1) {
2539                 shift_param = 0;
2540                 *first_ucode_section = 0;
2541         } else {
2542                 shift_param = 16;
2543                 (*first_ucode_section)++;
2544         }
2545
2546         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2547                 last_read_idx = i;
2548
2549                 /*
2550                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2551                  * CPU1 to CPU2.
2552                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2553                  * CPU2 non paged to CPU2 paging sec.
2554                  */
2555                 if (!image->fw_sect[i].data ||
2556                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2557                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2558                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2559                                     "Break since Data not valid or Empty section, sec = %d\n",
2560                                     i);
2561                         break;
2562                 }
2563                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2564                 if (ret)
2565                         return ret;
2566
2567                 /* Notify the ucode of the loaded section number and status */
2568                 if (iwm_nic_lock(sc)) {
2569                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2570                         val = val | (sec_num << shift_param);
2571                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2572                         sec_num = (sec_num << 1) | 0x1;
2573                         iwm_nic_unlock(sc);
2574                 }
2575         }
2576
2577         *first_ucode_section = last_read_idx;
2578
2579         iwm_enable_interrupts(sc);
2580
2581         if (iwm_nic_lock(sc)) {
2582                 if (cpu == 1)
2583                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2584                 else
2585                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2586                 iwm_nic_unlock(sc);
2587         }
2588
2589         return 0;
2590 }
2591
2592 static int
2593 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2594         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2595 {
2596         int shift_param;
2597         int i, ret = 0;
2598         uint32_t last_read_idx = 0;
2599
2600         if (cpu == 1) {
2601                 shift_param = 0;
2602                 *first_ucode_section = 0;
2603         } else {
2604                 shift_param = 16;
2605                 (*first_ucode_section)++;
2606         }
2607
2608         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2609                 last_read_idx = i;
2610
2611                 /*
2612                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2613                  * CPU1 to CPU2.
2614                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2615                  * CPU2 non paged to CPU2 paging sec.
2616                  */
2617                 if (!image->fw_sect[i].data ||
2618                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2619                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2620                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2621                                     "Break since Data not valid or Empty section, sec = %d\n",
2622                                      i);
2623                         break;
2624                 }
2625
2626                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2627                 if (ret)
2628                         return ret;
2629         }
2630
2631         *first_ucode_section = last_read_idx;
2632
2633         return 0;
2634
2635 }
2636
2637 static int
2638 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2639         const struct iwm_fw_sects *image)
2640 {
2641         int ret = 0;
2642         int first_ucode_section;
2643
2644         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2645                      image->is_dual_cpus ? "Dual" : "Single");
2646
2647         /* load to FW the binary non secured sections of CPU1 */
2648         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2649         if (ret)
2650                 return ret;
2651
2652         if (image->is_dual_cpus) {
2653                 /* set CPU2 header address */
2654                 if (iwm_nic_lock(sc)) {
2655                         iwm_write_prph(sc,
2656                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2657                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2658                         iwm_nic_unlock(sc);
2659                 }
2660
2661                 /* load to FW the binary sections of CPU2 */
2662                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2663                                                  &first_ucode_section);
2664                 if (ret)
2665                         return ret;
2666         }
2667
2668         iwm_enable_interrupts(sc);
2669
2670         /* release CPU reset */
2671         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2672
2673         return 0;
2674 }
2675
2676 int
2677 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2678         const struct iwm_fw_sects *image)
2679 {
2680         int ret = 0;
2681         int first_ucode_section;
2682
2683         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2684                     image->is_dual_cpus ? "Dual" : "Single");
2685
2686         /* configure the ucode to be ready to get the secured image */
2687         /* release CPU reset */
2688         if (iwm_nic_lock(sc)) {
2689                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2690                     IWM_RELEASE_CPU_RESET_BIT);
2691                 iwm_nic_unlock(sc);
2692         }
2693
2694         /* load to FW the binary Secured sections of CPU1 */
2695         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2696             &first_ucode_section);
2697         if (ret)
2698                 return ret;
2699
2700         /* load to FW the binary sections of CPU2 */
2701         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2702             &first_ucode_section);
2703 }
2704
2705 /* XXX Get rid of this definition */
2706 static inline void
2707 iwm_enable_fw_load_int(struct iwm_softc *sc)
2708 {
2709         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2710         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2711         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2712 }
2713
2714 /* XXX Add proper rfkill support code */
2715 static int
2716 iwm_start_fw(struct iwm_softc *sc,
2717         const struct iwm_fw_sects *fw)
2718 {
2719         int ret;
2720
2721         /* This may fail if AMT took ownership of the device */
2722         if (iwm_prepare_card_hw(sc)) {
2723                 device_printf(sc->sc_dev,
2724                     "%s: Exit HW not ready\n", __func__);
2725                 ret = EIO;
2726                 goto out;
2727         }
2728
2729         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2730
2731         iwm_disable_interrupts(sc);
2732
2733         /* make sure rfkill handshake bits are cleared */
2734         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2735         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2736             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2737
2738         /* clear (again), then enable host interrupts */
2739         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2740
2741         ret = iwm_nic_init(sc);
2742         if (ret) {
2743                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2744                 goto out;
2745         }
2746
2747         /*
2748          * Now, we load the firmware and don't want to be interrupted, even
2749          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2750          * FH_TX interrupt which is needed to load the firmware). If the
2751          * RF-Kill switch is toggled, we will find out after having loaded
2752          * the firmware and return the proper value to the caller.
2753          */
2754         iwm_enable_fw_load_int(sc);
2755
2756         /* really make sure rfkill handshake bits are cleared */
2757         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2758         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2759
2760         /* Load the given image to the HW */
2761         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2762                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2763         else
2764                 ret = iwm_pcie_load_given_ucode(sc, fw);
2765
2766         /* XXX re-check RF-Kill state */
2767
2768 out:
2769         return ret;
2770 }
2771
2772 static int
2773 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2774 {
2775         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2776                 .valid = htole32(valid_tx_ant),
2777         };
2778
2779         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2780             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2781 }
2782
2783 static int
2784 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2785 {
2786         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2787         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2788
2789         /* Set parameters */
2790         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2791         phy_cfg_cmd.calib_control.event_trigger =
2792             sc->sc_default_calib[ucode_type].event_trigger;
2793         phy_cfg_cmd.calib_control.flow_trigger =
2794             sc->sc_default_calib[ucode_type].flow_trigger;
2795
2796         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2797             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2798         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2799             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2800 }
2801
2802 static int
2803 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2804 {
2805         struct iwm_mvm_alive_data *alive_data = data;
2806         struct iwm_mvm_alive_resp_ver1 *palive1;
2807         struct iwm_mvm_alive_resp_ver2 *palive2;
2808         struct iwm_mvm_alive_resp *palive;
2809
2810         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2811                 palive1 = (void *)pkt->data;
2812
2813                 sc->support_umac_log = FALSE;
2814                 sc->error_event_table =
2815                         le32toh(palive1->error_event_table_ptr);
2816                 sc->log_event_table =
2817                         le32toh(palive1->log_event_table_ptr);
2818                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2819
2820                 alive_data->valid = le16toh(palive1->status) ==
2821                                     IWM_ALIVE_STATUS_OK;
2822                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2823                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2824                              le16toh(palive1->status), palive1->ver_type,
2825                              palive1->ver_subtype, palive1->flags);
2826         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2827                 palive2 = (void *)pkt->data;
2828                 sc->error_event_table =
2829                         le32toh(palive2->error_event_table_ptr);
2830                 sc->log_event_table =
2831                         le32toh(palive2->log_event_table_ptr);
2832                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2833                 sc->umac_error_event_table =
2834                         le32toh(palive2->error_info_addr);
2835
2836                 alive_data->valid = le16toh(palive2->status) ==
2837                                     IWM_ALIVE_STATUS_OK;
2838                 if (sc->umac_error_event_table)
2839                         sc->support_umac_log = TRUE;
2840
2841                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2842                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2843                             le16toh(palive2->status), palive2->ver_type,
2844                             palive2->ver_subtype, palive2->flags);
2845
2846                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2847                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2848                             palive2->umac_major, palive2->umac_minor);
2849         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2850                 palive = (void *)pkt->data;
2851
2852                 sc->error_event_table =
2853                         le32toh(palive->error_event_table_ptr);
2854                 sc->log_event_table =
2855                         le32toh(palive->log_event_table_ptr);
2856                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2857                 sc->umac_error_event_table =
2858                         le32toh(palive->error_info_addr);
2859
2860                 alive_data->valid = le16toh(palive->status) ==
2861                                     IWM_ALIVE_STATUS_OK;
2862                 if (sc->umac_error_event_table)
2863                         sc->support_umac_log = TRUE;
2864
2865                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2866                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2867                             le16toh(palive->status), palive->ver_type,
2868                             palive->ver_subtype, palive->flags);
2869
2870                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2871                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2872                             le32toh(palive->umac_major),
2873                             le32toh(palive->umac_minor));
2874         }
2875
2876         return TRUE;
2877 }
2878
2879 static int
2880 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2881         struct iwm_rx_packet *pkt, void *data)
2882 {
2883         struct iwm_phy_db *phy_db = data;
2884
2885         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2886                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2887                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2888                             __func__, pkt->hdr.code);
2889                 }
2890                 return TRUE;
2891         }
2892
2893         if (iwm_phy_db_set_section(phy_db, pkt)) {
2894                 device_printf(sc->sc_dev,
2895                     "%s: iwm_phy_db_set_section failed\n", __func__);
2896         }
2897
2898         return FALSE;
2899 }
2900
2901 static int
2902 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2903         enum iwm_ucode_type ucode_type)
2904 {
2905         struct iwm_notification_wait alive_wait;
2906         struct iwm_mvm_alive_data alive_data;
2907         const struct iwm_fw_sects *fw;
2908         enum iwm_ucode_type old_type = sc->cur_ucode;
2909         int error;
2910         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2911
2912         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2913                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2914                         error);
2915                 return error;
2916         }
2917         fw = &sc->sc_fw.fw_sects[ucode_type];
2918         sc->cur_ucode = ucode_type;
2919         sc->ucode_loaded = FALSE;
2920
2921         memset(&alive_data, 0, sizeof(alive_data));
2922         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2923                                    alive_cmd, NELEM(alive_cmd),
2924                                    iwm_alive_fn, &alive_data);
2925
2926         error = iwm_start_fw(sc, fw);
2927         if (error) {
2928                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2929                 sc->cur_ucode = old_type;
2930                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2931                 return error;
2932         }
2933
2934         /*
2935          * Some things may run in the background now, but we
2936          * just wait for the ALIVE notification here.
2937          */
2938         IWM_UNLOCK(sc);
2939         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2940                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2941         IWM_LOCK(sc);
2942         if (error) {
2943                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2944                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2945                         if (iwm_nic_lock(sc)) {
2946                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2947                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2948                                 iwm_nic_unlock(sc);
2949                         }
2950                         device_printf(sc->sc_dev,
2951                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2952                             a, b);
2953                 }
2954                 sc->cur_ucode = old_type;
2955                 return error;
2956         }
2957
2958         if (!alive_data.valid) {
2959                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2960                     __func__);
2961                 sc->cur_ucode = old_type;
2962                 return EIO;
2963         }
2964
2965         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2966
2967         /*
2968          * configure and operate fw paging mechanism.
2969          * driver configures the paging flow only once, CPU2 paging image
2970          * included in the IWM_UCODE_INIT image.
2971          */
2972         if (fw->paging_mem_size) {
2973                 error = iwm_save_fw_paging(sc, fw);
2974                 if (error) {
2975                         device_printf(sc->sc_dev,
2976                             "%s: failed to save the FW paging image\n",
2977                             __func__);
2978                         return error;
2979                 }
2980
2981                 error = iwm_send_paging_cmd(sc, fw);
2982                 if (error) {
2983                         device_printf(sc->sc_dev,
2984                             "%s: failed to send the paging cmd\n", __func__);
2985                         iwm_free_fw_paging(sc);
2986                         return error;
2987                 }
2988         }
2989
2990         if (!error)
2991                 sc->ucode_loaded = TRUE;
2992         return error;
2993 }
2994
2995 /*
2996  * mvm misc bits
2997  */
2998
2999 static int
3000 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3001 {
3002         struct iwm_notification_wait calib_wait;
3003         static const uint16_t init_complete[] = {
3004                 IWM_INIT_COMPLETE_NOTIF,
3005                 IWM_CALIB_RES_NOTIF_PHY_DB
3006         };
3007         int ret;
3008
3009         /* do not operate with rfkill switch turned on */
3010         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3011                 device_printf(sc->sc_dev,
3012                     "radio is disabled by hardware switch\n");
3013                 return EPERM;
3014         }
3015
3016         iwm_init_notification_wait(sc->sc_notif_wait,
3017                                    &calib_wait,
3018                                    init_complete,
3019                                    NELEM(init_complete),
3020                                    iwm_wait_phy_db_entry,
3021                                    sc->sc_phy_db);
3022
3023         /* Will also start the device */
3024         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3025         if (ret) {
3026                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3027                     ret);
3028                 goto error;
3029         }
3030
3031         if (justnvm) {
3032                 /* Read nvm */
3033                 ret = iwm_nvm_init(sc);
3034                 if (ret) {
3035                         device_printf(sc->sc_dev, "failed to read nvm\n");
3036                         goto error;
3037                 }
3038                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3039                 goto error;
3040         }
3041
3042         ret = iwm_send_bt_init_conf(sc);
3043         if (ret) {
3044                 device_printf(sc->sc_dev,
3045                     "failed to send bt coex configuration: %d\n", ret);
3046                 goto error;
3047         }
3048
3049         /* Send TX valid antennas before triggering calibrations */
3050         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3051         if (ret) {
3052                 device_printf(sc->sc_dev,
3053                     "failed to send antennas before calibration: %d\n", ret);
3054                 goto error;
3055         }
3056
3057         /*
3058          * Send phy configurations command to init uCode
3059          * to start the 16.0 uCode init image internal calibrations.
3060          */
3061         ret = iwm_send_phy_cfg_cmd(sc);
3062         if (ret) {
3063                 device_printf(sc->sc_dev,
3064                     "%s: Failed to run INIT calibrations: %d\n",
3065                     __func__, ret);
3066                 goto error;
3067         }
3068
3069         /*
3070          * Nothing to do but wait for the init complete notification
3071          * from the firmware.
3072          */
3073         IWM_UNLOCK(sc);
3074         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3075             IWM_MVM_UCODE_CALIB_TIMEOUT);
3076         IWM_LOCK(sc);
3077
3078
3079         goto out;
3080
3081 error:
3082         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3083 out:
3084         return ret;
3085 }
3086
3087 /*
3088  * receive side
3089  */
3090
3091 /* (re)stock rx ring, called at init-time and at runtime */
3092 static int
3093 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3094 {
3095         struct iwm_rx_ring *ring = &sc->rxq;
3096         struct iwm_rx_data *data = &ring->data[idx];
3097         struct mbuf *m;
3098         bus_dmamap_t dmamap;
3099         bus_dma_segment_t seg;
3100         int nsegs, error;
3101
3102         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3103         if (m == NULL)
3104                 return ENOBUFS;
3105
3106         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3107 #if defined(__DragonFly__)
3108         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3109             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3110 #else
3111         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3112             &seg, &nsegs, BUS_DMA_NOWAIT);
3113 #endif
3114         if (error != 0) {
3115                 device_printf(sc->sc_dev,
3116                     "%s: can't map mbuf, error %d\n", __func__, error);
3117                 m_freem(m);
3118                 return error;
3119         }
3120
3121         if (data->m != NULL)
3122                 bus_dmamap_unload(ring->data_dmat, data->map);
3123
3124         /* Swap ring->spare_map with data->map */
3125         dmamap = data->map;
3126         data->map = ring->spare_map;
3127         ring->spare_map = dmamap;
3128
3129         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3130         data->m = m;
3131
3132         /* Update RX descriptor. */
3133         KKASSERT((seg.ds_addr & 255) == 0);
3134         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3135         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3136             BUS_DMASYNC_PREWRITE);
3137
3138         return 0;
3139 }
3140
3141 /*
3142  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3143  * values are reported by the fw as positive values - need to negate
3144  * to obtain their dBM.  Account for missing antennas by replacing 0
3145  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3146  */
3147 static int
3148 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3149 {
3150         int energy_a, energy_b, energy_c, max_energy;
3151         uint32_t val;
3152
3153         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3154         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3155             IWM_RX_INFO_ENERGY_ANT_A_POS;
3156         energy_a = energy_a ? -energy_a : -256;
3157         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3158             IWM_RX_INFO_ENERGY_ANT_B_POS;
3159         energy_b = energy_b ? -energy_b : -256;
3160         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3161             IWM_RX_INFO_ENERGY_ANT_C_POS;
3162         energy_c = energy_c ? -energy_c : -256;
3163         max_energy = MAX(energy_a, energy_b);
3164         max_energy = MAX(max_energy, energy_c);
3165
3166         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3167             "energy In A %d B %d C %d , and max %d\n",
3168             energy_a, energy_b, energy_c, max_energy);
3169
3170         return max_energy;
3171 }
3172
3173 static void
3174 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3175 {
3176         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3177
3178         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3179
3180         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3181 }
3182
3183 /*
3184  * Retrieve the average noise (in dBm) among receivers.
3185  */
3186 static int
3187 iwm_get_noise(struct iwm_softc *sc,
3188         const struct iwm_mvm_statistics_rx_non_phy *stats)
3189 {
3190         int i, total, nbant, noise;
3191
3192         total = nbant = noise = 0;
3193         for (i = 0; i < 3; i++) {
3194                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3195                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3196                     __func__, i, noise);
3197
3198                 if (noise) {
3199                         total += noise;
3200                         nbant++;
3201                 }
3202         }
3203
3204         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3205             __func__, nbant, total);
3206 #if 0
3207         /* There should be at least one antenna but check anyway. */
3208         return (nbant == 0) ? -127 : (total / nbant) - 107;
3209 #else
3210         /* For now, just hard-code it to -96 to be safe */
3211         return (-96);
3212 #endif
3213 }
3214
3215 /*
3216  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3217  *
3218  * Handles the actual data of the Rx packet from the fw
3219  */
3220 static boolean_t
3221 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3222         boolean_t stolen)
3223 {
3224         struct ieee80211com *ic = &sc->sc_ic;
3225         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3226         struct ieee80211_frame *wh;
3227         struct ieee80211_node *ni;
3228         struct ieee80211_rx_stats rxs;
3229         struct iwm_rx_phy_info *phy_info;
3230         struct iwm_rx_mpdu_res_start *rx_res;
3231         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3232         uint32_t len;
3233         uint32_t rx_pkt_status;
3234         int rssi;
3235
3236         phy_info = &sc->sc_last_phy_info;
3237         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3238         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3239         len = le16toh(rx_res->byte_count);
3240         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3241
3242         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3243                 device_printf(sc->sc_dev,
3244                     "dsp size out of range [0,20]: %d\n",
3245                     phy_info->cfg_phy_cnt);
3246                 return FALSE;
3247         }
3248
3249         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3250             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3251                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3252                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3253                 return FALSE; /* drop */
3254         }
3255
3256         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3257         /* Note: RSSI is absolute (ie a -ve value) */
3258         if (rssi < IWM_MIN_DBM)
3259                 rssi = IWM_MIN_DBM;
3260         else if (rssi > IWM_MAX_DBM)
3261                 rssi = IWM_MAX_DBM;
3262
3263         /* Map it to relative value */
3264         rssi = rssi - sc->sc_noise;
3265
3266         /* replenish ring for the buffer we're going to feed to the sharks */
3267         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3268                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3269                     __func__);
3270                 return FALSE;
3271         }
3272
3273         m->m_data = pkt->data + sizeof(*rx_res);
3274         m->m_pkthdr.len = m->m_len = len;
3275
3276         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3277             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3278
3279         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3280
3281         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3282             "%s: phy_info: channel=%d, flags=0x%08x\n",
3283             __func__,
3284             le16toh(phy_info->channel),
3285             le16toh(phy_info->phy_flags));
3286
3287         /*
3288          * Populate an RX state struct with the provided information.
3289          */
3290         bzero(&rxs, sizeof(rxs));
3291         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3292         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3293         rxs.c_ieee = le16toh(phy_info->channel);
3294         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3295                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3296         } else {
3297                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3298         }
3299         /* rssi is in 1/2db units */
3300         rxs.rssi = rssi * 2;
3301         rxs.nf = sc->sc_noise;
3302
3303         if (ieee80211_radiotap_active_vap(vap)) {
3304                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3305
3306                 tap->wr_flags = 0;
3307                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3308                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3309                 tap->wr_chan_freq = htole16(rxs.c_freq);
3310                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3311                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3312                 tap->wr_dbm_antsignal = (int8_t)rssi;
3313                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3314                 tap->wr_tsft = phy_info->system_timestamp;
3315                 switch (phy_info->rate) {
3316                 /* CCK rates. */
3317                 case  10: tap->wr_rate =   2; break;
3318                 case  20: tap->wr_rate =   4; break;
3319                 case  55: tap->wr_rate =  11; break;
3320                 case 110: tap->wr_rate =  22; break;
3321                 /* OFDM rates. */
3322                 case 0xd: tap->wr_rate =  12; break;
3323                 case 0xf: tap->wr_rate =  18; break;
3324                 case 0x5: tap->wr_rate =  24; break;
3325                 case 0x7: tap->wr_rate =  36; break;
3326                 case 0x9: tap->wr_rate =  48; break;
3327                 case 0xb: tap->wr_rate =  72; break;
3328                 case 0x1: tap->wr_rate =  96; break;
3329                 case 0x3: tap->wr_rate = 108; break;
3330                 /* Unknown rate: should not happen. */
3331                 default:  tap->wr_rate =   0;
3332                 }
3333         }
3334
3335         IWM_UNLOCK(sc);
3336         if (ni != NULL) {
3337                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3338                 ieee80211_input_mimo(ni, m, &rxs);
3339                 ieee80211_free_node(ni);
3340         } else {
3341                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3342                 ieee80211_input_mimo_all(ic, m, &rxs);
3343         }
3344         IWM_LOCK(sc);
3345
3346         return TRUE;
3347 }
3348
3349 static int
3350 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3351         struct iwm_node *in)
3352 {
3353         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3354         struct ieee80211_node *ni = &in->in_ni;
3355         struct ieee80211vap *vap = ni->ni_vap;
3356         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3357         int failack = tx_resp->failure_frame;
3358
3359         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3360
3361         /* Update rate control statistics. */
3362         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3363             __func__,
3364             (int) le16toh(tx_resp->status.status),
3365             (int) le16toh(tx_resp->status.sequence),
3366             tx_resp->frame_count,
3367             tx_resp->bt_kill_count,
3368             tx_resp->failure_rts,
3369             tx_resp->failure_frame,
3370             le32toh(tx_resp->initial_rate),
3371             (int) le16toh(tx_resp->wireless_media_time));
3372
3373         if (status != IWM_TX_STATUS_SUCCESS &&
3374             status != IWM_TX_STATUS_DIRECT_DONE) {
3375                 ieee80211_ratectl_tx_complete(vap, ni,
3376                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3377                 return (1);
3378         } else {
3379                 ieee80211_ratectl_tx_complete(vap, ni,
3380                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3381                 return (0);
3382         }
3383 }
3384
3385 static void
3386 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3387 {
3388         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3389         int idx = cmd_hdr->idx;
3390         int qid = cmd_hdr->qid;
3391         struct iwm_tx_ring *ring = &sc->txq[qid];
3392         struct iwm_tx_data *txd = &ring->data[idx];
3393         struct iwm_node *in = txd->in;
3394         struct mbuf *m = txd->m;
3395         int status;
3396
3397         KASSERT(txd->done == 0, ("txd not done"));
3398         KASSERT(txd->in != NULL, ("txd without node"));
3399         KASSERT(txd->m != NULL, ("txd without mbuf"));
3400
3401         sc->sc_tx_timer = 0;
3402
3403         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3404
3405         /* Unmap and free mbuf. */
3406         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3407         bus_dmamap_unload(ring->data_dmat, txd->map);
3408
3409         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3410             "free txd %p, in %p\n", txd, txd->in);
3411         txd->done = 1;
3412         txd->m = NULL;
3413         txd->in = NULL;
3414
3415         ieee80211_tx_complete(&in->in_ni, m, status);
3416
3417         if (--ring->queued < IWM_TX_RING_LOMARK) {
3418                 sc->qfullmsk &= ~(1 << ring->qid);
3419                 if (sc->qfullmsk == 0) {
3420                         iwm_start(sc);
3421                 }
3422         }
3423 }
3424
3425 /*
3426  * transmit side
3427  */
3428
3429 /*
3430  * Process a "command done" firmware notification.  This is where we wakeup
3431  * processes waiting for a synchronous command completion.
3432  * from if_iwn
3433  */
3434 static void
3435 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3436 {
3437         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3438         struct iwm_tx_data *data;
3439
3440         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3441                 return; /* Not a command ack. */
3442         }
3443
3444         data = &ring->data[pkt->hdr.idx];
3445
3446         /* If the command was mapped in an mbuf, free it. */
3447         if (data->m != NULL) {
3448                 bus_dmamap_sync(ring->data_dmat, data->map,
3449                     BUS_DMASYNC_POSTWRITE);
3450                 bus_dmamap_unload(ring->data_dmat, data->map);
3451                 m_freem(data->m);
3452                 data->m = NULL;
3453         }
3454         wakeup(&ring->desc[pkt->hdr.idx]);
3455
3456         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3457                 device_printf(sc->sc_dev,
3458                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3459                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3460                 /* XXX call iwm_force_nmi() */
3461         }
3462
3463         KKASSERT(ring->queued > 0);
3464         ring->queued--;
3465         if (ring->queued == 0)
3466                 iwm_pcie_clear_cmd_in_flight(sc);
3467 }
3468
3469 #if 0
3470 /*
3471  * necessary only for block ack mode
3472  */
3473 void
3474 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3475         uint16_t len)
3476 {
3477         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3478         uint16_t w_val;
3479
3480         scd_bc_tbl = sc->sched_dma.vaddr;
3481
3482         len += 8; /* magic numbers came naturally from paris */
3483         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3484                 len = roundup(len, 4) / 4;
3485
3486         w_val = htole16(sta_id << 12 | len);
3487
3488         /* Update TX scheduler. */
3489         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3490         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3491             BUS_DMASYNC_PREWRITE);
3492
3493         /* I really wonder what this is ?!? */
3494         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3495                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3496                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3497                     BUS_DMASYNC_PREWRITE);
3498         }
3499 }
3500 #endif
3501
3502 /*
3503  * Take an 802.11 (non-n) rate, find the relevant rate
3504  * table entry.  return the index into in_ridx[].
3505  *
3506  * The caller then uses that index back into in_ridx
3507  * to figure out the rate index programmed /into/
3508  * the firmware for this given node.
3509  */
3510 static int
3511 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3512     uint8_t rate)
3513 {
3514         int i;
3515         uint8_t r;
3516
3517         for (i = 0; i < nitems(in->in_ridx); i++) {
3518                 r = iwm_rates[in->in_ridx[i]].rate;
3519                 if (rate == r)
3520                         return (i);
3521         }
3522         /* XXX Return the first */
3523         /* XXX TODO: have it return the /lowest/ */
3524         return (0);
3525 }
3526
3527 /*
3528  * Fill in the rate related information for a transmit command.
3529  */
3530 static const struct iwm_rate *
3531 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3532         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3533 {
3534         struct ieee80211com *ic = &sc->sc_ic;
3535         struct ieee80211_node *ni = &in->in_ni;
3536         const struct iwm_rate *rinfo;
3537         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3538         int ridx, rate_flags;
3539
3540         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3541         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3542
3543         /*
3544          * XXX TODO: everything about the rate selection here is terrible!
3545          */
3546
3547         if (type == IEEE80211_FC0_TYPE_DATA) {
3548                 int i;
3549                 /* for data frames, use RS table */
3550                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3551                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3552                 ridx = in->in_ridx[i];
3553
3554                 /* This is the index into the programmed table */
3555                 tx->initial_rate_index = i;
3556                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3557                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3558                     "%s: start with i=%d, txrate %d\n",
3559                     __func__, i, iwm_rates[ridx].rate);
3560         } else {
3561                 /*
3562                  * For non-data, use the lowest supported rate for the given
3563                  * operational mode.
3564                  *
3565                  * Note: there may not be any rate control information available.
3566                  * This driver currently assumes if we're transmitting data
3567                  * frames, use the rate control table.  Grr.
3568                  *
3569                  * XXX TODO: use the configured rate for the traffic type!
3570                  * XXX TODO: this should be per-vap, not curmode; as we later
3571                  * on we'll want to handle off-channel stuff (eg TDLS).
3572                  */
3573                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3574                         /*
3575                          * XXX this assumes the mode is either 11a or not 11a;
3576                          * definitely won't work for 11n.
3577                          */
3578                         ridx = IWM_RIDX_OFDM;
3579                 } else {
3580                         ridx = IWM_RIDX_CCK;
3581                 }
3582         }
3583
3584         rinfo = &iwm_rates[ridx];
3585
3586         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3587             __func__, ridx,
3588             rinfo->rate,
3589             !! (IWM_RIDX_IS_CCK(ridx))
3590             );
3591
3592         /* XXX TODO: hard-coded TX antenna? */
3593         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3594         if (IWM_RIDX_IS_CCK(ridx))
3595                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3596         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3597
3598         return rinfo;
3599 }
3600
3601 #define TB0_SIZE 16
3602 static int
3603 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3604 {
3605         struct ieee80211com *ic = &sc->sc_ic;
3606         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3607         struct iwm_node *in = IWM_NODE(ni);
3608         struct iwm_tx_ring *ring;
3609         struct iwm_tx_data *data;
3610         struct iwm_tfd *desc;
3611         struct iwm_device_cmd *cmd;
3612         struct iwm_tx_cmd *tx;
3613         struct ieee80211_frame *wh;
3614         struct ieee80211_key *k = NULL;
3615 #if !defined(__DragonFly__)
3616         struct mbuf *m1;
3617 #endif
3618         const struct iwm_rate *rinfo;
3619         uint32_t flags;
3620         u_int hdrlen;
3621         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3622         int nsegs;
3623         uint8_t tid, type;
3624         int i, totlen, error, pad;
3625
3626         wh = mtod(m, struct ieee80211_frame *);
3627         hdrlen = ieee80211_anyhdrsize(wh);
3628         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3629         tid = 0;
3630         ring = &sc->txq[ac];
3631         desc = &ring->desc[ring->cur];
3632         memset(desc, 0, sizeof(*desc));
3633         data = &ring->data[ring->cur];
3634
3635         /* Fill out iwm_tx_cmd to send to the firmware */
3636         cmd = &ring->cmd[ring->cur];
3637         cmd->hdr.code = IWM_TX_CMD;
3638         cmd->hdr.flags = 0;
3639         cmd->hdr.qid = ring->qid;
3640         cmd->hdr.idx = ring->cur;
3641
3642         tx = (void *)cmd->data;
3643         memset(tx, 0, sizeof(*tx));
3644
3645         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3646
3647         /* Encrypt the frame if need be. */
3648         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3649                 /* Retrieve key for TX && do software encryption. */
3650                 k = ieee80211_crypto_encap(ni, m);
3651                 if (k == NULL) {
3652                         m_freem(m);
3653                         return (ENOBUFS);
3654                 }
3655                 /* 802.11 header may have moved. */
3656                 wh = mtod(m, struct ieee80211_frame *);
3657         }
3658
3659         if (ieee80211_radiotap_active_vap(vap)) {
3660                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3661
3662                 tap->wt_flags = 0;
3663                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3664                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3665                 tap->wt_rate = rinfo->rate;
3666                 if (k != NULL)
3667                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3668                 ieee80211_radiotap_tx(vap, m);
3669         }
3670
3671
3672         totlen = m->m_pkthdr.len;
3673
3674         flags = 0;
3675         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3676                 flags |= IWM_TX_CMD_FLG_ACK;
3677         }
3678
3679         if (type == IEEE80211_FC0_TYPE_DATA
3680             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3681             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3682                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3683         }
3684
3685         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3686             type != IEEE80211_FC0_TYPE_DATA)
3687                 tx->sta_id = sc->sc_aux_sta.sta_id;
3688         else
3689                 tx->sta_id = IWM_STATION_ID;
3690
3691         if (type == IEEE80211_FC0_TYPE_MGT) {
3692                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3693
3694                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3695                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3696                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3697                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3698                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3699                 } else {
3700                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3701                 }
3702         } else {
3703                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3704         }
3705
3706         if (hdrlen & 3) {
3707                 /* First segment length must be a multiple of 4. */
3708                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3709                 pad = 4 - (hdrlen & 3);
3710         } else
3711                 pad = 0;
3712
3713         tx->driver_txop = 0;
3714         tx->next_frame_len = 0;
3715
3716         tx->len = htole16(totlen);
3717         tx->tid_tspec = tid;
3718         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3719
3720         /* Set physical address of "scratch area". */
3721         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3722         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3723
3724         /* Copy 802.11 header in TX command. */
3725         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3726
3727         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3728
3729         tx->sec_ctl = 0;
3730         tx->tx_flags |= htole32(flags);
3731
3732         /* Trim 802.11 header. */
3733         m_adj(m, hdrlen);
3734 #if defined(__DragonFly__)
3735         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3736                                             segs, IWM_MAX_SCATTER - 2,
3737                                             &nsegs, BUS_DMA_NOWAIT);
3738 #else
3739         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3740             segs, &nsegs, BUS_DMA_NOWAIT);
3741 #endif
3742         if (error != 0) {
3743 #if defined(__DragonFly__)
3744                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3745                     error);
3746                 m_freem(m);
3747                 return error;
3748 #else
3749                 if (error != EFBIG) {
3750                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3751                             error);
3752                         m_freem(m);
3753                         return error;
3754                 }
3755                 /* Too many DMA segments, linearize mbuf. */
3756                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3757                 if (m1 == NULL) {
3758                         device_printf(sc->sc_dev,
3759                             "%s: could not defrag mbuf\n", __func__);
3760                         m_freem(m);
3761                         return (ENOBUFS);
3762                 }
3763                 m = m1;
3764
3765                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3766                     segs, &nsegs, BUS_DMA_NOWAIT);
3767                 if (error != 0) {
3768                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3769                             error);
3770                         m_freem(m);
3771                         return error;
3772                 }
3773 #endif
3774         }
3775         data->m = m;
3776         data->in = in;
3777         data->done = 0;
3778
3779         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3780             "sending txd %p, in %p\n", data, data->in);
3781         KASSERT(data->in != NULL, ("node is NULL"));
3782
3783         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3784             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3785             ring->qid, ring->cur, totlen, nsegs,
3786             le32toh(tx->tx_flags),
3787             le32toh(tx->rate_n_flags),
3788             tx->initial_rate_index
3789             );
3790
3791         /* Fill TX descriptor. */
3792         desc->num_tbs = 2 + nsegs;
3793
3794         desc->tbs[0].lo = htole32(data->cmd_paddr);
3795         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3796             (TB0_SIZE << 4);
3797         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3798         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3799             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3800               + hdrlen + pad - TB0_SIZE) << 4);
3801
3802         /* Other DMA segments are for data payload. */
3803         for (i = 0; i < nsegs; i++) {
3804                 seg = &segs[i];
3805                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3806                 desc->tbs[i+2].hi_n_len = \
3807                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3808                     | ((seg->ds_len) << 4);
3809         }
3810
3811         bus_dmamap_sync(ring->data_dmat, data->map,
3812             BUS_DMASYNC_PREWRITE);
3813         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3814             BUS_DMASYNC_PREWRITE);
3815         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3816             BUS_DMASYNC_PREWRITE);
3817
3818 #if 0
3819         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3820 #endif
3821
3822         /* Kick TX ring. */
3823         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3824         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3825
3826         /* Mark TX ring as full if we reach a certain threshold. */
3827         if (++ring->queued > IWM_TX_RING_HIMARK) {
3828                 sc->qfullmsk |= 1 << ring->qid;
3829         }
3830
3831         return 0;
3832 }
3833
3834 static int
3835 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3836     const struct ieee80211_bpf_params *params)
3837 {
3838         struct ieee80211com *ic = ni->ni_ic;
3839         struct iwm_softc *sc = ic->ic_softc;
3840         int error = 0;
3841
3842         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3843             "->%s begin\n", __func__);
3844
3845         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3846                 m_freem(m);
3847                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3848                     "<-%s not RUNNING\n", __func__);
3849                 return (ENETDOWN);
3850         }
3851
3852         IWM_LOCK(sc);
3853         /* XXX fix this */
3854         if (params == NULL) {
3855                 error = iwm_tx(sc, m, ni, 0);
3856         } else {
3857                 error = iwm_tx(sc, m, ni, 0);
3858         }
3859         sc->sc_tx_timer = 5;
3860         IWM_UNLOCK(sc);
3861
3862         return (error);
3863 }
3864
3865 /*
3866  * mvm/tx.c
3867  */
3868
3869 /*
3870  * Note that there are transports that buffer frames before they reach
3871  * the firmware. This means that after flush_tx_path is called, the
3872  * queue might not be empty. The race-free way to handle this is to:
3873  * 1) set the station as draining
3874  * 2) flush the Tx path
3875  * 3) wait for the transport queues to be empty
3876  */
3877 int
3878 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3879 {
3880         int ret;
3881         struct iwm_tx_path_flush_cmd flush_cmd = {
3882                 .queues_ctl = htole32(tfd_msk),
3883                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3884         };
3885
3886         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3887             sizeof(flush_cmd), &flush_cmd);
3888         if (ret)
3889                 device_printf(sc->sc_dev,
3890                     "Flushing tx queue failed: %d\n", ret);
3891         return ret;
3892 }
3893
3894 static int
3895 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3896 {
3897         struct iwm_time_quota_cmd cmd;
3898         int i, idx, ret, num_active_macs, quota, quota_rem;
3899         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3900         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3901         uint16_t id;
3902
3903         memset(&cmd, 0, sizeof(cmd));
3904
3905         /* currently, PHY ID == binding ID */
3906         if (ivp) {
3907                 id = ivp->phy_ctxt->id;
3908                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3909                 colors[id] = ivp->phy_ctxt->color;
3910
3911                 if (1)
3912                         n_ifs[id] = 1;
3913         }
3914
3915         /*
3916          * The FW's scheduling session consists of
3917          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3918          * equally between all the bindings that require quota
3919          */
3920         num_active_macs = 0;
3921         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3922                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3923                 num_active_macs += n_ifs[i];
3924         }
3925
3926         quota = 0;
3927         quota_rem = 0;
3928         if (num_active_macs) {
3929                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3930                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3931         }
3932
3933         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3934                 if (colors[i] < 0)
3935                         continue;
3936
3937                 cmd.quotas[idx].id_and_color =
3938                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3939
3940                 if (n_ifs[i] <= 0) {
3941                         cmd.quotas[idx].quota = htole32(0);
3942                         cmd.quotas[idx].max_duration = htole32(0);
3943                 } else {
3944                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3945                         cmd.quotas[idx].max_duration = htole32(0);
3946                 }
3947                 idx++;
3948         }
3949
3950         /* Give the remainder of the session to the first binding */
3951         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3952
3953         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3954             sizeof(cmd), &cmd);
3955         if (ret)
3956                 device_printf(sc->sc_dev,
3957                     "%s: Failed to send quota: %d\n", __func__, ret);
3958         return ret;
3959 }
3960
3961 /*
3962  * ieee80211 routines
3963  */
3964
3965 /*
3966  * Change to AUTH state in 80211 state machine.  Roughly matches what
3967  * Linux does in bss_info_changed().
3968  */
3969 static int
3970 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3971 {
3972         struct ieee80211_node *ni;
3973         struct iwm_node *in;
3974         struct iwm_vap *iv = IWM_VAP(vap);
3975         uint32_t duration;
3976         int error;
3977
3978         /*
3979          * XXX i have a feeling that the vap node is being
3980          * freed from underneath us. Grr.
3981          */
3982         ni = ieee80211_ref_node(vap->iv_bss);
3983         in = IWM_NODE(ni);
3984         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3985             "%s: called; vap=%p, bss ni=%p\n",
3986             __func__,
3987             vap,
3988             ni);
3989
3990         in->in_assoc = 0;
3991
3992         /*
3993          * Firmware bug - it'll crash if the beacon interval is less
3994          * than 16. We can't avoid connecting at all, so refuse the
3995          * station state change, this will cause net80211 to abandon
3996          * attempts to connect to this AP, and eventually wpa_s will
3997          * blacklist the AP...
3998          */
3999         if (ni->ni_intval < 16) {
4000                 device_printf(sc->sc_dev,
4001                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4002                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
4003                 error = EINVAL;
4004                 goto out;
4005         }
4006
4007         error = iwm_allow_mcast(vap, sc);
4008         if (error) {
4009                 device_printf(sc->sc_dev,
4010                     "%s: failed to set multicast\n", __func__);
4011                 goto out;
4012         }
4013
4014         /*
4015          * This is where it deviates from what Linux does.
4016          *
4017          * Linux iwlwifi doesn't reset the nic each time, nor does it
4018          * call ctxt_add() here.  Instead, it adds it during vap creation,
4019          * and always does a mac_ctx_changed().
4020          *
4021          * The openbsd port doesn't attempt to do that - it reset things
4022          * at odd states and does the add here.
4023          *
4024          * So, until the state handling is fixed (ie, we never reset
4025          * the NIC except for a firmware failure, which should drag
4026          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4027          * contexts that are required), let's do a dirty hack here.
4028          */
4029         if (iv->is_uploaded) {
4030                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4031                         device_printf(sc->sc_dev,
4032                             "%s: failed to update MAC\n", __func__);
4033                         goto out;
4034                 }
4035         } else {
4036                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4037                         device_printf(sc->sc_dev,
4038                             "%s: failed to add MAC\n", __func__);
4039                         goto out;
4040                 }
4041         }
4042
4043         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4044             in->in_ni.ni_chan, 1, 1)) != 0) {
4045                 device_printf(sc->sc_dev,
4046                     "%s: failed update phy ctxt\n", __func__);
4047                 goto out;
4048         }
4049         iv->phy_ctxt = &sc->sc_phyctxt[0];
4050
4051         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4052                 device_printf(sc->sc_dev,
4053                     "%s: binding update cmd\n", __func__);
4054                 goto out;
4055         }
4056         /*
4057          * Authentication becomes unreliable when powersaving is left enabled
4058          * here. Powersaving will be activated again when association has
4059          * finished or is aborted.
4060          */
4061         iv->ps_disabled = TRUE;
4062         error = iwm_mvm_power_update_mac(sc);
4063         iv->ps_disabled = FALSE;
4064         if (error != 0) {
4065                 device_printf(sc->sc_dev,
4066                     "%s: failed to update power management\n",
4067                     __func__);
4068                 goto out;
4069         }
4070         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4071                 device_printf(sc->sc_dev,
4072                     "%s: failed to add sta\n", __func__);
4073                 goto out;
4074         }
4075
4076         /*
4077          * Prevent the FW from wandering off channel during association
4078          * by "protecting" the session with a time event.
4079          */
4080         /* XXX duration is in units of TU, not MS */
4081         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4082         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4083         DELAY(100);
4084
4085         error = 0;
4086 out:
4087         ieee80211_free_node(ni);
4088         return (error);
4089 }
4090
4091 static int
4092 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4093 {
4094         uint32_t tfd_msk;
4095
4096         /*
4097          * Ok, so *technically* the proper set of calls for going
4098          * from RUN back to SCAN is:
4099          *
4100          * iwm_mvm_power_mac_disable(sc, in);
4101          * iwm_mvm_mac_ctxt_changed(sc, vap);
4102          * iwm_mvm_rm_sta(sc, in);
4103          * iwm_mvm_update_quotas(sc, NULL);
4104          * iwm_mvm_mac_ctxt_changed(sc, in);
4105          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4106          * iwm_mvm_mac_ctxt_remove(sc, in);
4107          *
4108          * However, that freezes the device not matter which permutations
4109          * and modifications are attempted.  Obviously, this driver is missing
4110          * something since it works in the Linux driver, but figuring out what
4111          * is missing is a little more complicated.  Now, since we're going
4112          * back to nothing anyway, we'll just do a complete device reset.
4113          * Up your's, device!
4114          */
4115         /*
4116          * Just using 0xf for the queues mask is fine as long as we only
4117          * get here from RUN state.
4118          */
4119         tfd_msk = 0xf;
4120         mbufq_drain(&sc->sc_snd);
4121         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4122         /*
4123          * We seem to get away with just synchronously sending the
4124          * IWM_TXPATH_FLUSH command.
4125          */
4126 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4127         iwm_stop_device(sc);
4128         iwm_init_hw(sc);
4129         if (in)
4130                 in->in_assoc = 0;
4131         return 0;
4132
4133 #if 0
4134         int error;
4135
4136         iwm_mvm_power_mac_disable(sc, in);
4137
4138         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4139                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4140                 return error;
4141         }
4142
4143         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4144                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4145                 return error;
4146         }
4147         error = iwm_mvm_rm_sta(sc, in);
4148         in->in_assoc = 0;
4149         iwm_mvm_update_quotas(sc, NULL);
4150         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4151                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4152                 return error;
4153         }
4154         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4155
4156         iwm_mvm_mac_ctxt_remove(sc, in);
4157
4158         return error;
4159 #endif
4160 }
4161
4162 static struct ieee80211_node *
4163 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4164 {
4165         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4166             M_INTWAIT | M_ZERO);
4167 }
4168
4169 uint8_t
4170 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4171 {
4172         int i;
4173         uint8_t rval;
4174
4175         for (i = 0; i < rs->rs_nrates; i++) {
4176                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4177                 if (rval == iwm_rates[ridx].rate)
4178                         return rs->rs_rates[i];
4179         }
4180
4181         return 0;
4182 }
4183
4184 static void
4185 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4186 {
4187         struct ieee80211_node *ni = &in->in_ni;
4188         struct iwm_lq_cmd *lq = &in->in_lq;
4189         int nrates = ni->ni_rates.rs_nrates;
4190         int i, ridx, tab = 0;
4191         int txant = 0;
4192
4193         if (nrates > nitems(lq->rs_table)) {
4194                 device_printf(sc->sc_dev,
4195                     "%s: node supports %d rates, driver handles "
4196                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4197                 return;
4198         }
4199         if (nrates == 0) {
4200                 device_printf(sc->sc_dev,
4201                     "%s: node supports 0 rates, odd!\n", __func__);
4202                 return;
4203         }
4204
4205         /*
4206          * XXX .. and most of iwm_node is not initialised explicitly;
4207          * it's all just 0x0 passed to the firmware.
4208          */
4209
4210         /* first figure out which rates we should support */
4211         /* XXX TODO: this isn't 11n aware /at all/ */
4212         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4213         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4214             "%s: nrates=%d\n", __func__, nrates);
4215
4216         /*
4217          * Loop over nrates and populate in_ridx from the highest
4218          * rate to the lowest rate.  Remember, in_ridx[] has
4219          * IEEE80211_RATE_MAXSIZE entries!
4220          */
4221         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4222                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4223
4224                 /* Map 802.11 rate to HW rate index. */
4225                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4226                         if (iwm_rates[ridx].rate == rate)
4227                                 break;
4228                 if (ridx > IWM_RIDX_MAX) {
4229                         device_printf(sc->sc_dev,
4230                             "%s: WARNING: device rate for %d not found!\n",
4231                             __func__, rate);
4232                 } else {
4233                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4234                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4235                             __func__,
4236                             i,
4237                             rate,
4238                             ridx);
4239                         in->in_ridx[i] = ridx;
4240                 }
4241         }
4242
4243         /* then construct a lq_cmd based on those */
4244         memset(lq, 0, sizeof(*lq));
4245         lq->sta_id = IWM_STATION_ID;
4246
4247         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4248         if (ni->ni_flags & IEEE80211_NODE_HT)
4249                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4250
4251         /*
4252          * are these used? (we don't do SISO or MIMO)
4253          * need to set them to non-zero, though, or we get an error.
4254          */
4255         lq->single_stream_ant_msk = 1;
4256         lq->dual_stream_ant_msk = 1;
4257
4258         /*
4259          * Build the actual rate selection table.
4260          * The lowest bits are the rates.  Additionally,
4261          * CCK needs bit 9 to be set.  The rest of the bits
4262          * we add to the table select the tx antenna
4263          * Note that we add the rates in the highest rate first
4264          * (opposite of ni_rates).
4265          */
4266         /*
4267          * XXX TODO: this should be looping over the min of nrates
4268          * and LQ_MAX_RETRY_NUM.  Sigh.
4269          */
4270         for (i = 0; i < nrates; i++) {
4271                 int nextant;
4272
4273                 if (txant == 0)
4274                         txant = iwm_mvm_get_valid_tx_ant(sc);
4275                 nextant = 1<<(ffs(txant)-1);
4276                 txant &= ~nextant;
4277
4278                 /*
4279                  * Map the rate id into a rate index into
4280                  * our hardware table containing the
4281                  * configuration to use for this rate.
4282                  */
4283                 ridx = in->in_ridx[i];
4284                 tab = iwm_rates[ridx].plcp;
4285                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4286                 if (IWM_RIDX_IS_CCK(ridx))
4287                         tab |= IWM_RATE_MCS_CCK_MSK;
4288                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4289                     "station rate i=%d, rate=%d, hw=%x\n",
4290                     i, iwm_rates[ridx].rate, tab);
4291                 lq->rs_table[i] = htole32(tab);
4292         }
4293         /* then fill the rest with the lowest possible rate */
4294         for (i = nrates; i < nitems(lq->rs_table); i++) {
4295                 KASSERT(tab != 0, ("invalid tab"));
4296                 lq->rs_table[i] = htole32(tab);
4297         }
4298 }
4299
4300 static int
4301 iwm_media_change(struct ifnet *ifp)
4302 {
4303         struct ieee80211vap *vap = ifp->if_softc;
4304         struct ieee80211com *ic = vap->iv_ic;
4305         struct iwm_softc *sc = ic->ic_softc;
4306         int error;
4307
4308         error = ieee80211_media_change(ifp);
4309         if (error != ENETRESET)
4310                 return error;
4311
4312         IWM_LOCK(sc);
4313         if (ic->ic_nrunning > 0) {
4314                 iwm_stop(sc);
4315                 iwm_init(sc);
4316         }
4317         IWM_UNLOCK(sc);
4318         return error;
4319 }
4320
4321
4322 static int
4323 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4324 {
4325         struct iwm_vap *ivp = IWM_VAP(vap);
4326         struct ieee80211com *ic = vap->iv_ic;
4327         struct iwm_softc *sc = ic->ic_softc;
4328         struct iwm_node *in;
4329         int error;
4330
4331         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4332             "switching state %s -> %s\n",
4333             ieee80211_state_name[vap->iv_state],
4334             ieee80211_state_name[nstate]);
4335         IEEE80211_UNLOCK(ic);
4336         IWM_LOCK(sc);
4337
4338         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4339                 iwm_led_blink_stop(sc);
4340
4341         /* disable beacon filtering if we're hopping out of RUN */
4342         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4343                 iwm_mvm_disable_beacon_filter(sc);
4344
4345                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4346                         in->in_assoc = 0;
4347
4348                 if (nstate == IEEE80211_S_INIT) {
4349                         IWM_UNLOCK(sc);
4350                         IEEE80211_LOCK(ic);
4351                         error = ivp->iv_newstate(vap, nstate, arg);
4352                         IEEE80211_UNLOCK(ic);
4353                         IWM_LOCK(sc);
4354                         iwm_release(sc, NULL);
4355                         IWM_UNLOCK(sc);
4356                         IEEE80211_LOCK(ic);
4357                         return error;
4358                 }
4359
4360                 /*
4361                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4362                  * above then the card will be completely reinitialized,
4363                  * so the driver must do everything necessary to bring the card
4364                  * from INIT to SCAN.
4365                  *
4366                  * Additionally, upon receiving deauth frame from AP,
4367                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4368                  * state. This will also fail with this driver, so bring the FSM
4369                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4370                  *
4371                  * XXX TODO: fix this for FreeBSD!
4372                  */
4373                 if (nstate == IEEE80211_S_SCAN ||
4374                     nstate == IEEE80211_S_AUTH ||
4375                     nstate == IEEE80211_S_ASSOC) {
4376                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4377                             "Force transition to INIT; MGT=%d\n", arg);
4378                         IWM_UNLOCK(sc);
4379                         IEEE80211_LOCK(ic);
4380                         /* Always pass arg as -1 since we can't Tx right now. */
4381                         /*
4382                          * XXX arg is just ignored anyway when transitioning
4383                          *     to IEEE80211_S_INIT.
4384                          */
4385                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4386                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4387                             "Going INIT->SCAN\n");
4388                         nstate = IEEE80211_S_SCAN;
4389                         IEEE80211_UNLOCK(ic);
4390                         IWM_LOCK(sc);
4391                 }
4392         }
4393
4394         switch (nstate) {
4395         case IEEE80211_S_INIT:
4396         case IEEE80211_S_SCAN:
4397                 if (vap->iv_state == IEEE80211_S_AUTH ||
4398                     vap->iv_state == IEEE80211_S_ASSOC) {
4399                         int myerr;
4400                         IWM_UNLOCK(sc);
4401                         IEEE80211_LOCK(ic);
4402                         myerr = ivp->iv_newstate(vap, nstate, arg);
4403                         IEEE80211_UNLOCK(ic);
4404                         IWM_LOCK(sc);
4405                         error = iwm_mvm_rm_sta(sc, vap, FALSE);
4406                         if (error) {
4407                                 device_printf(sc->sc_dev,
4408                                     "%s: Failed to remove station: %d\n",
4409                                     __func__, error);
4410                         }
4411                         error = iwm_mvm_mac_ctxt_changed(sc, vap);
4412                         if (error) {
4413                                 device_printf(sc->sc_dev,
4414                                     "%s: Failed to change mac context: %d\n",
4415                                     __func__, error);
4416                         }
4417                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4418                         if (error) {
4419                                 device_printf(sc->sc_dev,
4420                                     "%s: Failed to remove channel ctx: %d\n",
4421                                     __func__, error);
4422                         }
4423                         ivp->phy_ctxt = NULL;
4424                         error = iwm_mvm_power_update_mac(sc);
4425                         if (error != 0) {
4426                                 device_printf(sc->sc_dev,
4427                                     "%s: failed to update power management\n",
4428                                     __func__);
4429                         }
4430                         IWM_UNLOCK(sc);
4431                         IEEE80211_LOCK(ic);
4432                         return myerr;
4433                 }
4434                 break;
4435
4436         case IEEE80211_S_AUTH:
4437                 if ((error = iwm_auth(vap, sc)) != 0) {
4438                         device_printf(sc->sc_dev,
4439                             "%s: could not move to auth state: %d\n",
4440                             __func__, error);
4441                 }
4442                 break;
4443
4444         case IEEE80211_S_ASSOC:
4445                 /*
4446                  * EBS may be disabled due to previous failures reported by FW.
4447                  * Reset EBS status here assuming environment has been changed.
4448                  */
4449                 sc->last_ebs_successful = TRUE;
4450                 break;
4451
4452         case IEEE80211_S_RUN:
4453         {
4454                 struct iwm_host_cmd cmd = {
4455                         .id = IWM_LQ_CMD,
4456                         .len = { sizeof(in->in_lq), },
4457                         .flags = IWM_CMD_SYNC,
4458                 };
4459
4460                 in = IWM_NODE(vap->iv_bss);
4461                 /* Update the association state, now we have it all */
4462                 /* (eg associd comes in at this point */
4463                 error = iwm_mvm_update_sta(sc, in);
4464                 if (error != 0) {
4465                         device_printf(sc->sc_dev,
4466                             "%s: failed to update STA\n", __func__);
4467                         IWM_UNLOCK(sc);
4468                         IEEE80211_LOCK(ic);
4469                         return error;
4470                 }
4471                 in->in_assoc = 1;
4472                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4473                 if (error != 0) {
4474                         device_printf(sc->sc_dev,
4475                             "%s: failed to update MAC: %d\n", __func__, error);
4476                 }
4477
4478                 iwm_mvm_sf_update(sc, vap, FALSE);
4479                 iwm_mvm_enable_beacon_filter(sc, ivp);
4480                 iwm_mvm_power_update_mac(sc);
4481                 iwm_mvm_update_quotas(sc, ivp);
4482                 iwm_setrates(sc, in);
4483
4484                 cmd.data[0] = &in->in_lq;
4485                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4486                         device_printf(sc->sc_dev,
4487                             "%s: IWM_LQ_CMD failed\n", __func__);
4488                 }
4489
4490                 iwm_mvm_led_enable(sc);
4491                 break;
4492         }
4493
4494         default:
4495                 break;
4496         }
4497         IWM_UNLOCK(sc);
4498         IEEE80211_LOCK(ic);
4499
4500         return (ivp->iv_newstate(vap, nstate, arg));
4501 }
4502
4503 void
4504 iwm_endscan_cb(void *arg, int pending)
4505 {
4506         struct iwm_softc *sc = arg;
4507         struct ieee80211com *ic = &sc->sc_ic;
4508
4509         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4510             "%s: scan ended\n",
4511             __func__);
4512
4513         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4514 }
4515
4516 static int
4517 iwm_send_bt_init_conf(struct iwm_softc *sc)
4518 {
4519         struct iwm_bt_coex_cmd bt_cmd;
4520
4521         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4522         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4523
4524         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4525             &bt_cmd);
4526 }
4527
4528 static boolean_t
4529 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4530 {
4531         boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4532         boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4533                                         IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4534
4535         if (iwm_lar_disable)
4536                 return FALSE;
4537
4538         /*
4539          * Enable LAR only if it is supported by the FW (TLV) &&
4540          * enabled in the NVM
4541          */
4542         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4543                 return nvm_lar && tlv_lar;
4544         else
4545                 return tlv_lar;
4546 }
4547
4548 static boolean_t
4549 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4550 {
4551         return fw_has_api(&sc->ucode_capa,
4552                           IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4553                fw_has_capa(&sc->ucode_capa,
4554                            IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4555 }
4556
4557 static int
4558 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4559 {
4560         struct iwm_mcc_update_cmd mcc_cmd;
4561         struct iwm_host_cmd hcmd = {
4562                 .id = IWM_MCC_UPDATE_CMD,
4563                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4564                 .data = { &mcc_cmd },
4565         };
4566         int ret;
4567 #ifdef IWM_DEBUG
4568         struct iwm_rx_packet *pkt;
4569         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4570         struct iwm_mcc_update_resp *mcc_resp;
4571         int n_channels;
4572         uint16_t mcc;
4573 #endif
4574         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4575             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4576
4577         if (!iwm_mvm_is_lar_supported(sc)) {
4578                 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4579                     __func__);
4580                 return 0;
4581         }
4582
4583         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4584         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4585         if (iwm_mvm_is_wifi_mcc_supported(sc))
4586                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4587         else
4588                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4589
4590         if (resp_v2)
4591                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4592         else
4593                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4594
4595         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4596             "send MCC update to FW with '%c%c' src = %d\n",
4597             alpha2[0], alpha2[1], mcc_cmd.source_id);
4598
4599         ret = iwm_send_cmd(sc, &hcmd);
4600         if (ret)
4601                 return ret;
4602
4603 #ifdef IWM_DEBUG
4604         pkt = hcmd.resp_pkt;
4605
4606         /* Extract MCC response */
4607         if (resp_v2) {
4608                 mcc_resp = (void *)pkt->data;
4609                 mcc = mcc_resp->mcc;
4610                 n_channels =  le32toh(mcc_resp->n_channels);
4611         } else {
4612                 mcc_resp_v1 = (void *)pkt->data;
4613                 mcc = mcc_resp_v1->mcc;
4614                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4615         }
4616
4617         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4618         if (mcc == 0)
4619                 mcc = 0x3030;  /* "00" - world */
4620
4621         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4622             "regulatory domain '%c%c' (%d channels available)\n",
4623             mcc >> 8, mcc & 0xff, n_channels);
4624 #endif
4625         iwm_free_resp(sc, &hcmd);
4626
4627         return 0;
4628 }
4629
4630 static void
4631 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4632 {
4633         struct iwm_host_cmd cmd = {
4634                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4635                 .len = { sizeof(uint32_t), },
4636                 .data = { &backoff, },
4637         };
4638
4639         if (iwm_send_cmd(sc, &cmd) != 0) {
4640                 device_printf(sc->sc_dev,
4641                     "failed to change thermal tx backoff\n");
4642         }
4643 }
4644
4645 static int
4646 iwm_init_hw(struct iwm_softc *sc)
4647 {
4648         struct ieee80211com *ic = &sc->sc_ic;
4649         int error, i, ac;
4650
4651         sc->sf_state = IWM_SF_UNINIT;
4652
4653         if ((error = iwm_start_hw(sc)) != 0) {
4654                 kprintf("iwm_start_hw: failed %d\n", error);
4655                 return error;
4656         }
4657
4658         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4659                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4660                 return error;
4661         }
4662
4663         /*
4664          * should stop and start HW since that INIT
4665          * image just loaded
4666          */
4667         iwm_stop_device(sc);
4668         sc->sc_ps_disabled = FALSE;
4669         if ((error = iwm_start_hw(sc)) != 0) {
4670                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4671                 return error;
4672         }
4673
4674         /* omstart, this time with the regular firmware */
4675         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4676         if (error) {
4677                 device_printf(sc->sc_dev, "could not load firmware\n");
4678                 goto error;
4679         }
4680
4681         error = iwm_mvm_sf_update(sc, NULL, FALSE);
4682         if (error)
4683                 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4684
4685         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4686                 device_printf(sc->sc_dev, "bt init conf failed\n");
4687                 goto error;
4688         }
4689
4690         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4691         if (error != 0) {
4692                 device_printf(sc->sc_dev, "antenna config failed\n");
4693                 goto error;
4694         }
4695
4696         /* Send phy db control command and then phy db calibration */
4697         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4698                 goto error;
4699
4700         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4701                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4702                 goto error;
4703         }
4704
4705         /* Add auxiliary station for scanning */
4706         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4707                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4708                 goto error;
4709         }
4710
4711         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4712                 /*
4713                  * The channel used here isn't relevant as it's
4714                  * going to be overwritten in the other flows.
4715                  * For now use the first channel we have.
4716                  */
4717                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4718                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4719                         goto error;
4720         }
4721
4722         /* Initialize tx backoffs to the minimum. */
4723         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4724                 iwm_mvm_tt_tx_backoff(sc, 0);
4725
4726         error = iwm_mvm_power_update_device(sc);
4727         if (error)
4728                 goto error;
4729
4730         if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4731                 goto error;
4732
4733         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4734                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4735                         goto error;
4736         }
4737
4738         /* Enable Tx queues. */
4739         for (ac = 0; ac < WME_NUM_AC; ac++) {
4740                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4741                     iwm_mvm_ac_to_tx_fifo[ac]);
4742                 if (error)
4743                         goto error;
4744         }
4745
4746         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4747                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4748                 goto error;
4749         }
4750
4751         return 0;
4752
4753  error:
4754         iwm_stop_device(sc);
4755         return error;
4756 }
4757
4758 /* Allow multicast from our BSSID. */
4759 static int
4760 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4761 {
4762         struct ieee80211_node *ni = vap->iv_bss;
4763         struct iwm_mcast_filter_cmd *cmd;
4764         size_t size;
4765         int error;
4766
4767         size = roundup(sizeof(*cmd), 4);
4768         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4769         if (cmd == NULL)
4770                 return ENOMEM;
4771         cmd->filter_own = 1;
4772         cmd->port_id = 0;
4773         cmd->count = 0;
4774         cmd->pass_all = 1;
4775         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4776
4777         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4778             IWM_CMD_SYNC, size, cmd);
4779         kfree(cmd, M_DEVBUF);
4780
4781         return (error);
4782 }
4783
4784 /*
4785  * ifnet interfaces
4786  */
4787
4788 static void
4789 iwm_init(struct iwm_softc *sc)
4790 {
4791         int error;
4792
4793         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4794                 return;
4795         }
4796         sc->sc_generation++;
4797         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4798
4799         if ((error = iwm_init_hw(sc)) != 0) {
4800                 kprintf("iwm_init_hw failed %d\n", error);
4801                 iwm_stop(sc);
4802                 return;
4803         }
4804
4805         /*
4806          * Ok, firmware loaded and we are jogging
4807          */
4808         sc->sc_flags |= IWM_FLAG_HW_INITED;
4809         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4810 }
4811
4812 static int
4813 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4814 {
4815         struct iwm_softc *sc;
4816         int error;
4817
4818         sc = ic->ic_softc;
4819
4820         IWM_LOCK(sc);
4821         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4822                 IWM_UNLOCK(sc);
4823                 return (ENXIO);
4824         }
4825         error = mbufq_enqueue(&sc->sc_snd, m);
4826         if (error) {
4827                 IWM_UNLOCK(sc);
4828                 return (error);
4829         }
4830         iwm_start(sc);
4831         IWM_UNLOCK(sc);
4832         return (0);
4833 }
4834
4835 /*
4836  * Dequeue packets from sendq and call send.
4837  */
4838 static void
4839 iwm_start(struct iwm_softc *sc)
4840 {
4841         struct ieee80211_node *ni;
4842         struct mbuf *m;
4843         int ac = 0;
4844
4845         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4846         while (sc->qfullmsk == 0 &&
4847                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4848                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4849                 if (iwm_tx(sc, m, ni, ac) != 0) {
4850                         if_inc_counter(ni->ni_vap->iv_ifp,
4851                             IFCOUNTER_OERRORS, 1);
4852                         ieee80211_free_node(ni);
4853                         continue;
4854                 }
4855                 sc->sc_tx_timer = 15;
4856         }
4857         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4858 }
4859
4860 static void
4861 iwm_stop(struct iwm_softc *sc)
4862 {
4863
4864         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4865         sc->sc_flags |= IWM_FLAG_STOPPED;
4866         sc->sc_generation++;
4867         iwm_led_blink_stop(sc);
4868         sc->sc_tx_timer = 0;
4869         iwm_stop_device(sc);
4870         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4871 }
4872
4873 static void
4874 iwm_watchdog(void *arg)
4875 {
4876         struct iwm_softc *sc = arg;
4877
4878         if (sc->sc_tx_timer > 0) {
4879                 if (--sc->sc_tx_timer == 0) {
4880                         device_printf(sc->sc_dev, "device timeout\n");
4881 #ifdef IWM_DEBUG
4882                         iwm_nic_error(sc);
4883 #endif
4884                         iwm_stop(sc);
4885 #if defined(__DragonFly__)
4886                         ++sc->sc_ic.ic_oerrors;
4887 #else
4888                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4889 #endif
4890                         return;
4891                 }
4892         }
4893         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4894 }
4895
4896 static void
4897 iwm_parent(struct ieee80211com *ic)
4898 {
4899         struct iwm_softc *sc = ic->ic_softc;
4900         int startall = 0;
4901
4902         IWM_LOCK(sc);
4903         if (ic->ic_nrunning > 0) {
4904                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4905                         iwm_init(sc);
4906                         startall = 1;
4907                 }
4908         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4909                 iwm_stop(sc);
4910         IWM_UNLOCK(sc);
4911         if (startall)
4912                 ieee80211_start_all(ic);
4913 }
4914
4915 /*
4916  * The interrupt side of things
4917  */
4918
4919 /*
4920  * error dumping routines are from iwlwifi/mvm/utils.c
4921  */
4922
4923 /*
4924  * Note: This structure is read from the device with IO accesses,
4925  * and the reading already does the endian conversion. As it is
4926  * read with uint32_t-sized accesses, any members with a different size
4927  * need to be ordered correctly though!
4928  */
4929 struct iwm_error_event_table {
4930         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4931         uint32_t error_id;              /* type of error */
4932         uint32_t trm_hw_status0;        /* TRM HW status */
4933         uint32_t trm_hw_status1;        /* TRM HW status */
4934         uint32_t blink2;                /* branch link */
4935         uint32_t ilink1;                /* interrupt link */
4936         uint32_t ilink2;                /* interrupt link */
4937         uint32_t data1;         /* error-specific data */
4938         uint32_t data2;         /* error-specific data */
4939         uint32_t data3;         /* error-specific data */
4940         uint32_t bcon_time;             /* beacon timer */
4941         uint32_t tsf_low;               /* network timestamp function timer */
4942         uint32_t tsf_hi;                /* network timestamp function timer */
4943         uint32_t gp1;           /* GP1 timer register */
4944         uint32_t gp2;           /* GP2 timer register */
4945         uint32_t fw_rev_type;   /* firmware revision type */
4946         uint32_t major;         /* uCode version major */
4947         uint32_t minor;         /* uCode version minor */
4948         uint32_t hw_ver;                /* HW Silicon version */
4949         uint32_t brd_ver;               /* HW board version */
4950         uint32_t log_pc;                /* log program counter */
4951         uint32_t frame_ptr;             /* frame pointer */
4952         uint32_t stack_ptr;             /* stack pointer */
4953         uint32_t hcmd;          /* last host command header */
4954         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4955                                  * rxtx_flag */
4956         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4957                                  * host_flag */
4958         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4959                                  * enc_flag */
4960         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4961                                  * time_flag */
4962         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4963                                  * wico interrupt */
4964         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4965         uint32_t wait_event;            /* wait event() caller address */
4966         uint32_t l2p_control;   /* L2pControlField */
4967         uint32_t l2p_duration;  /* L2pDurationField */
4968         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4969         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4970         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4971                                  * (LMPM_PMG_SEL) */
4972         uint32_t u_timestamp;   /* indicate when the date and time of the
4973                                  * compilation */
4974         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4975 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4976
4977 /*
4978  * UMAC error struct - relevant starting from family 8000 chip.
4979  * Note: This structure is read from the device with IO accesses,
4980  * and the reading already does the endian conversion. As it is
4981  * read with u32-sized accesses, any members with a different size
4982  * need to be ordered correctly though!
4983  */
4984 struct iwm_umac_error_event_table {
4985         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4986         uint32_t error_id;      /* type of error */
4987         uint32_t blink1;        /* branch link */
4988         uint32_t blink2;        /* branch link */
4989         uint32_t ilink1;        /* interrupt link */
4990         uint32_t ilink2;        /* interrupt link */
4991         uint32_t data1;         /* error-specific data */
4992         uint32_t data2;         /* error-specific data */
4993         uint32_t data3;         /* error-specific data */
4994         uint32_t umac_major;
4995         uint32_t umac_minor;
4996         uint32_t frame_pointer; /* core register 27*/
4997         uint32_t stack_pointer; /* core register 28 */
4998         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4999         uint32_t nic_isr_pref;  /* ISR status register */
5000 } __packed;
5001
5002 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5003 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5004
5005 #ifdef IWM_DEBUG
5006 struct {
5007         const char *name;
5008         uint8_t num;
5009 } advanced_lookup[] = {
5010         { "NMI_INTERRUPT_WDG", 0x34 },
5011         { "SYSASSERT", 0x35 },
5012         { "UCODE_VERSION_MISMATCH", 0x37 },
5013         { "BAD_COMMAND", 0x38 },
5014         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5015         { "FATAL_ERROR", 0x3D },
5016         { "NMI_TRM_HW_ERR", 0x46 },
5017         { "NMI_INTERRUPT_TRM", 0x4C },
5018         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5019         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5020         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5021         { "NMI_INTERRUPT_HOST", 0x66 },
5022         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5023         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5024         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5025         { "ADVANCED_SYSASSERT", 0 },
5026 };
5027
5028 static const char *
5029 iwm_desc_lookup(uint32_t num)
5030 {
5031         int i;
5032
5033         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5034                 if (advanced_lookup[i].num == num)
5035                         return advanced_lookup[i].name;
5036
5037         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5038         return advanced_lookup[i].name;
5039 }
5040
5041 static void
5042 iwm_nic_umac_error(struct iwm_softc *sc)
5043 {
5044         struct iwm_umac_error_event_table table;
5045         uint32_t base;
5046
5047         base = sc->umac_error_event_table;
5048
5049         if (base < 0x800000) {
5050                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5051                     base);
5052                 return;
5053         }
5054
5055         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5056                 device_printf(sc->sc_dev, "reading errlog failed\n");
5057                 return;
5058         }
5059
5060         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5061                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5062                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5063                     sc->sc_flags, table.valid);
5064         }
5065
5066         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5067                 iwm_desc_lookup(table.error_id));
5068         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5069         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5070         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5071             table.ilink1);
5072         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5073             table.ilink2);
5074         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5075         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5076         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5077         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5078         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5079         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5080             table.frame_pointer);
5081         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5082             table.stack_pointer);
5083         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5084         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5085             table.nic_isr_pref);
5086 }
5087
5088 /*
5089  * Support for dumping the error log seemed like a good idea ...
5090  * but it's mostly hex junk and the only sensible thing is the
5091  * hw/ucode revision (which we know anyway).  Since it's here,
5092  * I'll just leave it in, just in case e.g. the Intel guys want to
5093  * help us decipher some "ADVANCED_SYSASSERT" later.
5094  */
5095 static void
5096 iwm_nic_error(struct iwm_softc *sc)
5097 {
5098         struct iwm_error_event_table table;
5099         uint32_t base;
5100
5101         device_printf(sc->sc_dev, "dumping device error log\n");
5102         base = sc->error_event_table;
5103         if (base < 0x800000) {
5104                 device_printf(sc->sc_dev,
5105                     "Invalid error log pointer 0x%08x\n", base);
5106                 return;
5107         }
5108
5109         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5110                 device_printf(sc->sc_dev, "reading errlog failed\n");
5111                 return;
5112         }
5113
5114         if (!table.valid) {
5115                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5116                 return;
5117         }
5118
5119         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5120                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5121                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5122                     sc->sc_flags, table.valid);
5123         }
5124
5125         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5126             iwm_desc_lookup(table.error_id));
5127         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5128             table.trm_hw_status0);
5129         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5130             table.trm_hw_status1);
5131         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5132         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5133         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5134         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5135         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5136         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5137         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5138         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5139         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5140         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5141         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5142         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5143             table.fw_rev_type);
5144         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5145         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5146         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5147         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5148         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5149         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5150         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5151         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5152         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5153         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5154         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5155         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5156         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5157         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5158         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5159         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5160         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5161         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5162         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5163
5164         if (sc->umac_error_event_table)
5165                 iwm_nic_umac_error(sc);
5166 }
5167 #endif
5168
5169 static void
5170 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5171 {
5172         struct ieee80211com *ic = &sc->sc_ic;
5173         struct iwm_cmd_response *cresp;
5174         struct mbuf *m1;
5175         uint32_t offset = 0;
5176         uint32_t maxoff = IWM_RBUF_SIZE;
5177         uint32_t nextoff;
5178         boolean_t stolen = FALSE;
5179
5180 #define HAVEROOM(a)     \
5181     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5182
5183         while (HAVEROOM(offset)) {
5184                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5185                     offset);
5186                 int qid, idx, code, len;
5187
5188                 qid = pkt->hdr.qid;
5189                 idx = pkt->hdr.idx;
5190
5191                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5192
5193                 /*
5194                  * randomly get these from the firmware, no idea why.
5195                  * they at least seem harmless, so just ignore them for now
5196                  */
5197                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5198                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5199                         break;
5200                 }
5201
5202                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5203                     "rx packet qid=%d idx=%d type=%x\n",
5204                     qid & ~0x80, pkt->hdr.idx, code);
5205
5206                 len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5207                 len += sizeof(uint32_t); /* account for status word */
5208                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5209
5210                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5211
5212                 switch (code) {
5213                 case IWM_REPLY_RX_PHY_CMD:
5214                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5215                         break;
5216
5217                 case IWM_REPLY_RX_MPDU_CMD: {
5218                         /*
5219                          * If this is the last frame in the RX buffer, we
5220                          * can directly feed the mbuf to the sharks here.
5221                          */
5222                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5223                             struct iwm_rx_packet *, nextoff);
5224                         if (!HAVEROOM(nextoff) ||
5225                             (nextpkt->hdr.code == 0 &&
5226                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5227                              nextpkt->hdr.idx == 0) ||
5228                             (nextpkt->len_n_flags ==
5229                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5230                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5231                                         stolen = FALSE;
5232                                         /* Make sure we abort the loop */
5233                                         nextoff = maxoff;
5234                                 }
5235                                 break;
5236                         }
5237
5238                         /*
5239                          * Use m_copym instead of m_split, because that
5240                          * makes it easier to keep a valid rx buffer in
5241                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5242                          *
5243                          * We need to start m_copym() at offset 0, to get the
5244                          * M_PKTHDR flag preserved.
5245                          */
5246                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5247                         if (m1) {
5248                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5249                                         stolen = TRUE;
5250                                 else
5251                                         m_freem(m1);
5252                         }
5253                         break;
5254                 }
5255
5256                 case IWM_TX_CMD:
5257                         iwm_mvm_rx_tx_cmd(sc, pkt);
5258                         break;
5259
5260                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5261                         struct iwm_missed_beacons_notif *resp;
5262                         int missed;
5263
5264                         /* XXX look at mac_id to determine interface ID */
5265                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5266
5267                         resp = (void *)pkt->data;
5268                         missed = le32toh(resp->consec_missed_beacons);
5269
5270                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5271                             "%s: MISSED_BEACON: mac_id=%d, "
5272                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5273                             "num_rx=%d\n",
5274                             __func__,
5275                             le32toh(resp->mac_id),
5276                             le32toh(resp->consec_missed_beacons_since_last_rx),
5277                             le32toh(resp->consec_missed_beacons),
5278                             le32toh(resp->num_expected_beacons),
5279                             le32toh(resp->num_recvd_beacons));
5280
5281                         /* Be paranoid */
5282                         if (vap == NULL)
5283                                 break;
5284
5285                         /* XXX no net80211 locking? */
5286                         if (vap->iv_state == IEEE80211_S_RUN &&
5287                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5288                                 if (missed > vap->iv_bmissthreshold) {
5289                                         /* XXX bad locking; turn into task */
5290                                         IWM_UNLOCK(sc);
5291                                         ieee80211_beacon_miss(ic);
5292                                         IWM_LOCK(sc);
5293                                 }
5294                         }
5295
5296                         break; }
5297
5298                 case IWM_MFUART_LOAD_NOTIFICATION:
5299                         break;
5300
5301                 case IWM_MVM_ALIVE:
5302                         break;
5303
5304                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5305                         break;
5306
5307                 case IWM_STATISTICS_NOTIFICATION: {
5308                         struct iwm_notif_statistics *stats;
5309                         stats = (void *)pkt->data;
5310                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5311                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5312                         break;
5313                 }
5314
5315                 case IWM_NVM_ACCESS_CMD:
5316                 case IWM_MCC_UPDATE_CMD:
5317                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5318                                 memcpy(sc->sc_cmd_resp,
5319                                     pkt, sizeof(sc->sc_cmd_resp));
5320                         }
5321                         break;
5322
5323                 case IWM_MCC_CHUB_UPDATE_CMD: {
5324                         struct iwm_mcc_chub_notif *notif;
5325                         notif = (void *)pkt->data;
5326
5327                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5328                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5329                         sc->sc_fw_mcc[2] = '\0';
5330                         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5331                             "fw source %d sent CC '%s'\n",
5332                             notif->source_id, sc->sc_fw_mcc);
5333                         break;
5334                 }
5335
5336                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5337                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5338                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5339                         struct iwm_dts_measurement_notif_v1 *notif;
5340
5341                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5342                                 device_printf(sc->sc_dev,
5343                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5344                                 break;
5345                         }
5346                         notif = (void *)pkt->data;
5347                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5348                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5349                             notif->temp);
5350                         break;
5351                 }
5352
5353                 case IWM_PHY_CONFIGURATION_CMD:
5354                 case IWM_TX_ANT_CONFIGURATION_CMD:
5355                 case IWM_ADD_STA:
5356                 case IWM_MAC_CONTEXT_CMD:
5357                 case IWM_REPLY_SF_CFG_CMD:
5358                 case IWM_POWER_TABLE_CMD:
5359                 case IWM_PHY_CONTEXT_CMD:
5360                 case IWM_BINDING_CONTEXT_CMD:
5361                 case IWM_TIME_EVENT_CMD:
5362                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5363                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5364                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5365                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5366                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5367                 case IWM_REPLY_BEACON_FILTERING_CMD:
5368                 case IWM_MAC_PM_POWER_TABLE:
5369                 case IWM_TIME_QUOTA_CMD:
5370                 case IWM_REMOVE_STA:
5371                 case IWM_TXPATH_FLUSH:
5372                 case IWM_LQ_CMD:
5373                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5374                                  IWM_FW_PAGING_BLOCK_CMD):
5375                 case IWM_BT_CONFIG:
5376                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5377                         cresp = (void *)pkt->data;
5378                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5379                                 memcpy(sc->sc_cmd_resp,
5380                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5381                         }
5382                         break;
5383
5384                 /* ignore */
5385                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5386                         break;
5387
5388                 case IWM_INIT_COMPLETE_NOTIF:
5389                         break;
5390
5391                 case IWM_SCAN_OFFLOAD_COMPLETE:
5392                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5393                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5394                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5395                                 ieee80211_runtask(ic, &sc->sc_es_task);
5396                         }
5397                         break;
5398
5399                 case IWM_SCAN_ITERATION_COMPLETE: {
5400                         struct iwm_lmac_scan_complete_notif *notif;
5401                         notif = (void *)pkt->data;
5402                         break;
5403                 }
5404
5405                 case IWM_SCAN_COMPLETE_UMAC:
5406                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5407                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5408                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5409                                 ieee80211_runtask(ic, &sc->sc_es_task);
5410                         }
5411                         break;
5412
5413                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5414                         struct iwm_umac_scan_iter_complete_notif *notif;
5415                         notif = (void *)pkt->data;
5416
5417                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5418                             "complete, status=0x%x, %d channels scanned\n",
5419                             notif->status, notif->scanned_channels);
5420                         break;
5421                 }
5422
5423                 case IWM_REPLY_ERROR: {
5424                         struct iwm_error_resp *resp;
5425                         resp = (void *)pkt->data;
5426
5427                         device_printf(sc->sc_dev,
5428                             "firmware error 0x%x, cmd 0x%x\n",
5429                             le32toh(resp->error_type),
5430                             resp->cmd_id);
5431                         break;
5432                 }
5433
5434                 case IWM_TIME_EVENT_NOTIFICATION: {
5435                         struct iwm_time_event_notif *notif;
5436                         notif = (void *)pkt->data;
5437
5438                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5439                             "TE notif status = 0x%x action = 0x%x\n",
5440                             notif->status, notif->action);
5441                         break;
5442                 }
5443
5444                 case IWM_MCAST_FILTER_CMD:
5445                         break;
5446
5447                 case IWM_SCD_QUEUE_CFG: {
5448                         struct iwm_scd_txq_cfg_rsp *rsp;
5449                         rsp = (void *)pkt->data;
5450
5451                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5452                             "queue cfg token=0x%x sta_id=%d "
5453                             "tid=%d scd_queue=%d\n",
5454                             rsp->token, rsp->sta_id, rsp->tid,
5455                             rsp->scd_queue);
5456                         break;
5457                 }
5458
5459                 default:
5460                         device_printf(sc->sc_dev,
5461                             "frame %d/%d %x UNHANDLED (this should "
5462                             "not happen)\n", qid & ~0x80, idx,
5463                             pkt->len_n_flags);
5464                         break;
5465                 }
5466
5467                 /*
5468                  * Why test bit 0x80?  The Linux driver:
5469                  *
5470                  * There is one exception:  uCode sets bit 15 when it
5471                  * originates the response/notification, i.e. when the
5472                  * response/notification is not a direct response to a
5473                  * command sent by the driver.  For example, uCode issues
5474                  * IWM_REPLY_RX when it sends a received frame to the driver;
5475                  * it is not a direct response to any driver command.
5476                  *
5477                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5478                  * uses a slightly different format for pkt->hdr, and "qid"
5479                  * is actually the upper byte of a two-byte field.
5480                  */
5481                 if (!(qid & (1 << 7)))
5482                         iwm_cmd_done(sc, pkt);
5483
5484                 offset = nextoff;
5485         }
5486         if (stolen)
5487                 m_freem(m);
5488 #undef HAVEROOM
5489 }
5490
5491 /*
5492  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5493  * Basic structure from if_iwn
5494  */
5495 static void
5496 iwm_notif_intr(struct iwm_softc *sc)
5497 {
5498         uint16_t hw;
5499
5500         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5501             BUS_DMASYNC_POSTREAD);
5502
5503         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5504
5505         /*
5506          * Process responses
5507          */
5508         while (sc->rxq.cur != hw) {
5509                 struct iwm_rx_ring *ring = &sc->rxq;
5510                 struct iwm_rx_data *data = &ring->data[ring->cur];
5511
5512                 bus_dmamap_sync(ring->data_dmat, data->map,
5513                     BUS_DMASYNC_POSTREAD);
5514
5515                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5516                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5517                 iwm_handle_rxb(sc, data->m);
5518
5519                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5520         }
5521
5522         /*
5523          * Tell the firmware that it can reuse the ring entries that
5524          * we have just processed.
5525          * Seems like the hardware gets upset unless we align
5526          * the write by 8??
5527          */
5528         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5529         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5530 }
5531
5532 static void
5533 iwm_intr(void *arg)
5534 {
5535         struct iwm_softc *sc = arg;
5536         int handled = 0;
5537         int r1, r2, rv = 0;
5538         int isperiodic = 0;
5539
5540 #if defined(__DragonFly__)
5541         if (sc->sc_mem == NULL) {
5542                 kprintf("iwm_intr: detached\n");
5543                 return;
5544         }
5545 #endif
5546         IWM_LOCK(sc);
5547         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5548
5549         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5550                 uint32_t *ict = sc->ict_dma.vaddr;
5551                 int tmp;
5552
5553                 tmp = htole32(ict[sc->ict_cur]);
5554                 if (!tmp)
5555                         goto out_ena;
5556
5557                 /*
5558                  * ok, there was something.  keep plowing until we have all.
5559                  */
5560                 r1 = r2 = 0;
5561                 while (tmp) {
5562                         r1 |= tmp;
5563                         ict[sc->ict_cur] = 0;
5564                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5565                         tmp = htole32(ict[sc->ict_cur]);
5566                 }
5567
5568                 /* this is where the fun begins.  don't ask */
5569                 if (r1 == 0xffffffff)
5570                         r1 = 0;
5571
5572                 /* i am not expected to understand this */
5573                 if (r1 & 0xc0000)
5574                         r1 |= 0x8000;
5575                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5576         } else {
5577                 r1 = IWM_READ(sc, IWM_CSR_INT);
5578                 /* "hardware gone" (where, fishing?) */
5579                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5580                         goto out;
5581                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5582         }
5583         if (r1 == 0 && r2 == 0) {
5584                 goto out_ena;
5585         }
5586
5587         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5588
5589         /* Safely ignore these bits for debug checks below */
5590         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5591
5592         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5593                 int i;
5594                 struct ieee80211com *ic = &sc->sc_ic;
5595                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5596
5597 #ifdef IWM_DEBUG
5598                 iwm_nic_error(sc);
5599 #endif
5600                 /* Dump driver status (TX and RX rings) while we're here. */
5601                 device_printf(sc->sc_dev, "driver status:\n");
5602                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5603                         struct iwm_tx_ring *ring = &sc->txq[i];
5604                         device_printf(sc->sc_dev,
5605                             "  tx ring %2d: qid=%-2d cur=%-3d "
5606                             "queued=%-3d\n",
5607                             i, ring->qid, ring->cur, ring->queued);
5608                 }
5609                 device_printf(sc->sc_dev,
5610                     "  rx ring: cur=%d\n", sc->rxq.cur);
5611                 device_printf(sc->sc_dev,
5612                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5613
5614                 /* Don't stop the device; just do a VAP restart */
5615                 IWM_UNLOCK(sc);
5616
5617                 if (vap == NULL) {
5618                         kprintf("%s: null vap\n", __func__);
5619                         return;
5620                 }
5621
5622                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5623                     "restarting\n", __func__, vap->iv_state);
5624
5625                 ieee80211_restart_all(ic);
5626                 return;
5627         }
5628
5629         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5630                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5631                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5632                 iwm_stop(sc);
5633                 rv = 1;
5634                 goto out;
5635         }
5636
5637         /* firmware chunk loaded */
5638         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5639                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5640                 handled |= IWM_CSR_INT_BIT_FH_TX;
5641                 sc->sc_fw_chunk_done = 1;
5642                 wakeup(&sc->sc_fw);
5643         }
5644
5645         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5646                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5647                 if (iwm_check_rfkill(sc)) {
5648                         device_printf(sc->sc_dev,
5649                             "%s: rfkill switch, disabling interface\n",
5650                             __func__);
5651                         iwm_stop(sc);
5652                 }
5653         }
5654
5655         /*
5656          * The Linux driver uses periodic interrupts to avoid races.
5657          * We cargo-cult like it's going out of fashion.
5658          */
5659         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5660                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5661                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5662                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5663                         IWM_WRITE_1(sc,
5664                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5665                 isperiodic = 1;
5666         }
5667
5668         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5669                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5670                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5671
5672                 iwm_notif_intr(sc);
5673
5674                 /* enable periodic interrupt, see above */
5675                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5676                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5677                             IWM_CSR_INT_PERIODIC_ENA);
5678         }
5679
5680         if (__predict_false(r1 & ~handled))
5681                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5682                     "%s: unhandled interrupts: %x\n", __func__, r1);
5683         rv = 1;
5684
5685  out_ena:
5686         iwm_restore_interrupts(sc);
5687  out:
5688         IWM_UNLOCK(sc);
5689         return;
5690 }
5691
5692 /*
5693  * Autoconf glue-sniffing
5694  */
5695 #define PCI_VENDOR_INTEL                0x8086
5696 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5697 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5698 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5699 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5700 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5701 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5702 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5703 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5704 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5705 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5706
5707 static const struct iwm_devices {
5708         uint16_t                device;
5709         const struct iwm_cfg    *cfg;
5710 } iwm_devices[] = {
5711         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5712         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5713         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5714         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5715         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5716         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5717         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5718         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5719         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5720         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5721 };
5722
5723 static int
5724 iwm_probe(device_t dev)
5725 {
5726         int i;
5727
5728         for (i = 0; i < nitems(iwm_devices); i++) {
5729                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5730                     pci_get_device(dev) == iwm_devices[i].device) {
5731                         device_set_desc(dev, iwm_devices[i].cfg->name);
5732                         return (BUS_PROBE_DEFAULT);
5733                 }
5734         }
5735
5736         return (ENXIO);
5737 }
5738
5739 static int
5740 iwm_dev_check(device_t dev)
5741 {
5742         struct iwm_softc *sc;
5743         uint16_t devid;
5744         int i;
5745
5746         sc = device_get_softc(dev);
5747
5748         devid = pci_get_device(dev);
5749         for (i = 0; i < NELEM(iwm_devices); i++) {
5750                 if (iwm_devices[i].device == devid) {
5751                         sc->cfg = iwm_devices[i].cfg;
5752                         return (0);
5753                 }
5754         }
5755         device_printf(dev, "unknown adapter type\n");
5756         return ENXIO;
5757 }
5758
5759 /* PCI registers */
5760 #define PCI_CFG_RETRY_TIMEOUT   0x041
5761
5762 static int
5763 iwm_pci_attach(device_t dev)
5764 {
5765         struct iwm_softc *sc;
5766         int count, error, rid;
5767         uint16_t reg;
5768 #if defined(__DragonFly__)
5769         int irq_flags;
5770 #endif
5771
5772         sc = device_get_softc(dev);
5773
5774         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5775          * PCI Tx retries from interfering with C3 CPU state */
5776         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5777
5778         /* Enable bus-mastering and hardware bug workaround. */
5779         pci_enable_busmaster(dev);
5780         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5781         /* if !MSI */
5782         if (reg & PCIM_STATUS_INTxSTATE) {
5783                 reg &= ~PCIM_STATUS_INTxSTATE;
5784         }
5785         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5786
5787         rid = PCIR_BAR(0);
5788         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5789             RF_ACTIVE);
5790         if (sc->sc_mem == NULL) {
5791                 device_printf(sc->sc_dev, "can't map mem space\n");
5792                 return (ENXIO);
5793         }
5794         sc->sc_st = rman_get_bustag(sc->sc_mem);
5795         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5796
5797         /* Install interrupt handler. */
5798         count = 1;
5799         rid = 0;
5800 #if defined(__DragonFly__)
5801         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5802         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5803 #else
5804         if (pci_alloc_msi(dev, &count) == 0)
5805                 rid = 1;
5806         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5807             (rid != 0 ? 0 : RF_SHAREABLE));
5808 #endif
5809         if (sc->sc_irq == NULL) {
5810                 device_printf(dev, "can't map interrupt\n");
5811                         return (ENXIO);
5812         }
5813 #if defined(__DragonFly__)
5814         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5815                                iwm_intr, sc, &sc->sc_ih,
5816                                &wlan_global_serializer);
5817 #else
5818         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5819             NULL, iwm_intr, sc, &sc->sc_ih);
5820 #endif
5821         if (sc->sc_ih == NULL) {
5822                 device_printf(dev, "can't establish interrupt");
5823 #if defined(__DragonFly__)
5824                 pci_release_msi(dev);
5825 #endif
5826                         return (ENXIO);
5827         }
5828         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5829
5830         return (0);
5831 }
5832
5833 static void
5834 iwm_pci_detach(device_t dev)
5835 {
5836         struct iwm_softc *sc = device_get_softc(dev);
5837
5838         if (sc->sc_irq != NULL) {
5839                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5840                 bus_release_resource(dev, SYS_RES_IRQ,
5841                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5842                 pci_release_msi(dev);
5843 #if defined(__DragonFly__)
5844                 sc->sc_irq = NULL;
5845 #endif
5846         }
5847         if (sc->sc_mem != NULL) {
5848                 bus_release_resource(dev, SYS_RES_MEMORY,
5849                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5850 #if defined(__DragonFly__)
5851                 sc->sc_mem = NULL;
5852 #endif
5853         }
5854 }
5855
5856
5857
5858 static int
5859 iwm_attach(device_t dev)
5860 {
5861         struct iwm_softc *sc = device_get_softc(dev);
5862         struct ieee80211com *ic = &sc->sc_ic;
5863         int error;
5864         int txq_i, i;
5865
5866         sc->sc_dev = dev;
5867         sc->sc_attached = 1;
5868         IWM_LOCK_INIT(sc);
5869         mbufq_init(&sc->sc_snd, ifqmaxlen);
5870 #if defined(__DragonFly__)
5871         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5872 #else
5873         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5874 #endif
5875         callout_init(&sc->sc_led_blink_to);
5876         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5877
5878         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5879         if (sc->sc_notif_wait == NULL) {
5880                 device_printf(dev, "failed to init notification wait struct\n");
5881                 goto fail;
5882         }
5883
5884         sc->sf_state = IWM_SF_UNINIT;
5885
5886         /* Init phy db */
5887         sc->sc_phy_db = iwm_phy_db_init(sc);
5888         if (!sc->sc_phy_db) {
5889                 device_printf(dev, "Cannot init phy_db\n");
5890                 goto fail;
5891         }
5892
5893         /* Set EBS as successful as long as not stated otherwise by the FW. */
5894         sc->last_ebs_successful = TRUE;
5895
5896         /* PCI attach */
5897         error = iwm_pci_attach(dev);
5898         if (error != 0)
5899                 goto fail;
5900
5901         sc->sc_wantresp = -1;
5902
5903         /* Check device type */
5904         error = iwm_dev_check(dev);
5905         if (error != 0)
5906                 goto fail;
5907
5908         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5909         /*
5910          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5911          * changed, and now the revision step also includes bit 0-1 (no more
5912          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5913          * in the old format.
5914          */
5915         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5916                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5917                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5918
5919         if (iwm_prepare_card_hw(sc) != 0) {
5920                 device_printf(dev, "could not initialize hardware\n");
5921                 goto fail;
5922         }
5923
5924         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5925                 int ret;
5926                 uint32_t hw_step;
5927
5928                 /*
5929                  * In order to recognize C step the driver should read the
5930                  * chip version id located at the AUX bus MISC address.
5931                  */
5932                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5933                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5934                 DELAY(2);
5935
5936                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5937                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5938                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5939                                    25000);
5940                 if (!ret) {
5941                         device_printf(sc->sc_dev,
5942                             "Failed to wake up the nic\n");
5943                         goto fail;
5944                 }
5945
5946                 if (iwm_nic_lock(sc)) {
5947                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5948                         hw_step |= IWM_ENABLE_WFPM;
5949                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5950                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5951                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5952                         if (hw_step == 0x3)
5953                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5954                                                 (IWM_SILICON_C_STEP << 2);
5955                         iwm_nic_unlock(sc);
5956                 } else {
5957                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5958                         goto fail;
5959                 }
5960         }
5961
5962         /* special-case 7265D, it has the same PCI IDs. */
5963         if (sc->cfg == &iwm7265_cfg &&
5964             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5965                 sc->cfg = &iwm7265d_cfg;
5966         }
5967
5968         /* Allocate DMA memory for firmware transfers. */
5969         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5970                 device_printf(dev, "could not allocate memory for firmware\n");
5971                 goto fail;
5972         }
5973
5974         /* Allocate "Keep Warm" page. */
5975         if ((error = iwm_alloc_kw(sc)) != 0) {
5976                 device_printf(dev, "could not allocate keep warm page\n");
5977                 goto fail;
5978         }
5979
5980         /* We use ICT interrupts */
5981         if ((error = iwm_alloc_ict(sc)) != 0) {
5982                 device_printf(dev, "could not allocate ICT table\n");
5983                 goto fail;
5984         }
5985
5986         /* Allocate TX scheduler "rings". */
5987         if ((error = iwm_alloc_sched(sc)) != 0) {
5988                 device_printf(dev, "could not allocate TX scheduler rings\n");
5989                 goto fail;
5990         }
5991
5992         /* Allocate TX rings */
5993         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5994                 if ((error = iwm_alloc_tx_ring(sc,
5995                     &sc->txq[txq_i], txq_i)) != 0) {
5996                         device_printf(dev,
5997                             "could not allocate TX ring %d\n",
5998                             txq_i);
5999                         goto fail;
6000                 }
6001         }
6002
6003         /* Allocate RX ring. */
6004         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6005                 device_printf(dev, "could not allocate RX ring\n");
6006                 goto fail;
6007         }
6008
6009         /* Clear pending interrupts. */
6010         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6011
6012         ic->ic_softc = sc;
6013         ic->ic_name = device_get_nameunit(sc->sc_dev);
6014         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6015         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6016
6017         /* Set device capabilities. */
6018         ic->ic_caps =
6019             IEEE80211_C_STA |
6020             IEEE80211_C_WPA |           /* WPA/RSN */
6021             IEEE80211_C_WME |
6022             IEEE80211_C_PMGT |
6023             IEEE80211_C_SHSLOT |        /* short slot time supported */
6024             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6025 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6026             ;
6027         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6028                 sc->sc_phyctxt[i].id = i;
6029                 sc->sc_phyctxt[i].color = 0;
6030                 sc->sc_phyctxt[i].ref = 0;
6031                 sc->sc_phyctxt[i].channel = NULL;
6032         }
6033
6034         /* Default noise floor */
6035         sc->sc_noise = -96;
6036
6037         /* Max RSSI */
6038         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6039
6040         sc->sc_preinit_hook.ich_func = iwm_preinit;
6041         sc->sc_preinit_hook.ich_arg = sc;
6042         sc->sc_preinit_hook.ich_desc = "iwm";
6043         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6044                 device_printf(dev, "config_intrhook_establish failed\n");
6045                 goto fail;
6046         }
6047
6048 #ifdef IWM_DEBUG
6049         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6050             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6051             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6052 #endif
6053
6054         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6055             "<-%s\n", __func__);
6056
6057         return 0;
6058
6059         /* Free allocated memory if something failed during attachment. */
6060 fail:
6061         iwm_detach_local(sc, 0);
6062
6063         return ENXIO;
6064 }
6065
6066 static int
6067 iwm_is_valid_ether_addr(uint8_t *addr)
6068 {
6069         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6070
6071         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6072                 return (FALSE);
6073
6074         return (TRUE);
6075 }
6076
6077 static int
6078 iwm_wme_update(struct ieee80211com *ic)
6079 {
6080 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6081         struct iwm_softc *sc = ic->ic_softc;
6082         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6083         struct iwm_vap *ivp = IWM_VAP(vap);
6084         struct iwm_node *in;
6085         struct wmeParams tmp[WME_NUM_AC];
6086         int aci, error;
6087
6088         if (vap == NULL)
6089                 return (0);
6090
6091         IEEE80211_LOCK(ic);
6092         for (aci = 0; aci < WME_NUM_AC; aci++)
6093                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6094         IEEE80211_UNLOCK(ic);
6095
6096         IWM_LOCK(sc);
6097         for (aci = 0; aci < WME_NUM_AC; aci++) {
6098                 const struct wmeParams *ac = &tmp[aci];
6099                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6100                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6101                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6102                 ivp->queue_params[aci].edca_txop =
6103                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6104         }
6105         ivp->have_wme = TRUE;
6106         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6107                 in = IWM_NODE(vap->iv_bss);
6108                 if (in->in_assoc) {
6109                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6110                                 device_printf(sc->sc_dev,
6111                                     "%s: failed to update MAC\n", __func__);
6112                         }
6113                 }
6114         }
6115         IWM_UNLOCK(sc);
6116
6117         return (0);
6118 #undef IWM_EXP2
6119 }
6120
6121 static void
6122 iwm_preinit(void *arg)
6123 {
6124         struct iwm_softc *sc = arg;
6125         device_t dev = sc->sc_dev;
6126         struct ieee80211com *ic = &sc->sc_ic;
6127         int error;
6128
6129         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6130             "->%s\n", __func__);
6131
6132         IWM_LOCK(sc);
6133         if ((error = iwm_start_hw(sc)) != 0) {
6134                 device_printf(dev, "could not initialize hardware\n");
6135                 IWM_UNLOCK(sc);
6136                 goto fail;
6137         }
6138
6139         error = iwm_run_init_mvm_ucode(sc, 1);
6140         iwm_stop_device(sc);
6141         if (error) {
6142                 IWM_UNLOCK(sc);
6143                 goto fail;
6144         }
6145         device_printf(dev,
6146             "hw rev 0x%x, fw ver %s, address %s\n",
6147             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6148             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6149
6150         /* not all hardware can do 5GHz band */
6151         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6152                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6153                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6154         IWM_UNLOCK(sc);
6155
6156         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6157             ic->ic_channels);
6158
6159         /*
6160          * At this point we've committed - if we fail to do setup,
6161          * we now also have to tear down the net80211 state.
6162          */
6163         ieee80211_ifattach(ic);
6164         ic->ic_vap_create = iwm_vap_create;
6165         ic->ic_vap_delete = iwm_vap_delete;
6166         ic->ic_raw_xmit = iwm_raw_xmit;
6167         ic->ic_node_alloc = iwm_node_alloc;
6168         ic->ic_scan_start = iwm_scan_start;
6169         ic->ic_scan_end = iwm_scan_end;
6170         ic->ic_update_mcast = iwm_update_mcast;
6171         ic->ic_getradiocaps = iwm_init_channel_map;
6172         ic->ic_set_channel = iwm_set_channel;
6173         ic->ic_scan_curchan = iwm_scan_curchan;
6174         ic->ic_scan_mindwell = iwm_scan_mindwell;
6175         ic->ic_wme.wme_update = iwm_wme_update;
6176         ic->ic_parent = iwm_parent;
6177         ic->ic_transmit = iwm_transmit;
6178         iwm_radiotap_attach(sc);
6179         if (bootverbose)
6180                 ieee80211_announce(ic);
6181
6182         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6183             "<-%s\n", __func__);
6184         config_intrhook_disestablish(&sc->sc_preinit_hook);
6185
6186         return;
6187 fail:
6188         config_intrhook_disestablish(&sc->sc_preinit_hook);
6189         iwm_detach_local(sc, 0);
6190 }
6191
6192 /*
6193  * Attach the interface to 802.11 radiotap.
6194  */
6195 static void
6196 iwm_radiotap_attach(struct iwm_softc *sc)
6197 {
6198         struct ieee80211com *ic = &sc->sc_ic;
6199
6200         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6201             "->%s begin\n", __func__);
6202         ieee80211_radiotap_attach(ic,
6203             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6204                 IWM_TX_RADIOTAP_PRESENT,
6205             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6206                 IWM_RX_RADIOTAP_PRESENT);
6207         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6208             "->%s end\n", __func__);
6209 }
6210
6211 static struct ieee80211vap *
6212 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6213     enum ieee80211_opmode opmode, int flags,
6214     const uint8_t bssid[IEEE80211_ADDR_LEN],
6215     const uint8_t mac[IEEE80211_ADDR_LEN])
6216 {
6217         struct iwm_vap *ivp;
6218         struct ieee80211vap *vap;
6219
6220         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6221                 return NULL;
6222         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6223         vap = &ivp->iv_vap;
6224         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6225         vap->iv_bmissthreshold = 10;            /* override default */
6226         /* Override with driver methods. */
6227         ivp->iv_newstate = vap->iv_newstate;
6228         vap->iv_newstate = iwm_newstate;
6229
6230         ivp->id = IWM_DEFAULT_MACID;
6231         ivp->color = IWM_DEFAULT_COLOR;
6232
6233         ivp->have_wme = FALSE;
6234         ivp->ps_disabled = FALSE;
6235
6236         ieee80211_ratectl_init(vap);
6237         /* Complete setup. */
6238         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6239             mac);
6240         ic->ic_opmode = opmode;
6241
6242         return vap;
6243 }
6244
6245 static void
6246 iwm_vap_delete(struct ieee80211vap *vap)
6247 {
6248         struct iwm_vap *ivp = IWM_VAP(vap);
6249
6250         ieee80211_ratectl_deinit(vap);
6251         ieee80211_vap_detach(vap);
6252         kfree(ivp, M_80211_VAP);
6253 }
6254
6255 static void
6256 iwm_scan_start(struct ieee80211com *ic)
6257 {
6258         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6259         struct iwm_softc *sc = ic->ic_softc;
6260         int error;
6261
6262         IWM_LOCK(sc);
6263         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6264                 /* This should not be possible */
6265                 device_printf(sc->sc_dev,
6266                     "%s: Previous scan not completed yet\n", __func__);
6267         }
6268         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6269                 error = iwm_mvm_umac_scan(sc);
6270         else
6271                 error = iwm_mvm_lmac_scan(sc);
6272         if (error != 0) {
6273                 device_printf(sc->sc_dev, "could not initiate scan\n");
6274                 IWM_UNLOCK(sc);
6275                 ieee80211_cancel_scan(vap);
6276         } else {
6277                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6278                 iwm_led_blink_start(sc);
6279                 IWM_UNLOCK(sc);
6280         }
6281 }
6282
6283 static void
6284 iwm_scan_end(struct ieee80211com *ic)
6285 {
6286         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6287         struct iwm_softc *sc = ic->ic_softc;
6288
6289         IWM_LOCK(sc);
6290         iwm_led_blink_stop(sc);
6291         if (vap->iv_state == IEEE80211_S_RUN)
6292                 iwm_mvm_led_enable(sc);
6293         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6294                 /*
6295                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6296                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6297                  * taskqueue.
6298                  */
6299                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6300                 iwm_mvm_scan_stop_wait(sc);
6301         }
6302         IWM_UNLOCK(sc);
6303
6304         /*
6305          * Make sure we don't race, if sc_es_task is still enqueued here.
6306          * This is to make sure that it won't call ieee80211_scan_done
6307          * when we have already started the next scan.
6308          */
6309         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6310 }
6311
6312 static void
6313 iwm_update_mcast(struct ieee80211com *ic)
6314 {
6315 }
6316
6317 static void
6318 iwm_set_channel(struct ieee80211com *ic)
6319 {
6320 }
6321
6322 static void
6323 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6324 {
6325 }
6326
6327 static void
6328 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6329 {
6330         return;
6331 }
6332
6333 void
6334 iwm_init_task(void *arg1)
6335 {
6336         struct iwm_softc *sc = arg1;
6337
6338         IWM_LOCK(sc);
6339         while (sc->sc_flags & IWM_FLAG_BUSY) {
6340 #if defined(__DragonFly__)
6341                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6342 #else
6343                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6344 #endif
6345 }
6346         sc->sc_flags |= IWM_FLAG_BUSY;
6347         iwm_stop(sc);
6348         if (sc->sc_ic.ic_nrunning > 0)
6349                 iwm_init(sc);
6350         sc->sc_flags &= ~IWM_FLAG_BUSY;
6351         wakeup(&sc->sc_flags);
6352         IWM_UNLOCK(sc);
6353 }
6354
6355 static int
6356 iwm_resume(device_t dev)
6357 {
6358         struct iwm_softc *sc = device_get_softc(dev);
6359         int do_reinit = 0;
6360
6361         /*
6362          * We disable the RETRY_TIMEOUT register (0x41) to keep
6363          * PCI Tx retries from interfering with C3 CPU state.
6364          */
6365         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6366         iwm_init_task(device_get_softc(dev));
6367
6368         IWM_LOCK(sc);
6369         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6370                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6371                 do_reinit = 1;
6372         }
6373         IWM_UNLOCK(sc);
6374
6375         if (do_reinit)
6376                 ieee80211_resume_all(&sc->sc_ic);
6377
6378         return 0;
6379 }
6380
6381 static int
6382 iwm_suspend(device_t dev)
6383 {
6384         int do_stop = 0;
6385         struct iwm_softc *sc = device_get_softc(dev);
6386
6387         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6388
6389         ieee80211_suspend_all(&sc->sc_ic);
6390
6391         if (do_stop) {
6392                 IWM_LOCK(sc);
6393                 iwm_stop(sc);
6394                 sc->sc_flags |= IWM_FLAG_SCANNING;
6395                 IWM_UNLOCK(sc);
6396         }
6397
6398         return (0);
6399 }
6400
6401 static int
6402 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6403 {
6404         struct iwm_fw_info *fw = &sc->sc_fw;
6405         device_t dev = sc->sc_dev;
6406         int i;
6407
6408         if (!sc->sc_attached)
6409                 return 0;
6410         sc->sc_attached = 0;
6411         if (do_net80211) {
6412                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6413         }
6414         callout_drain(&sc->sc_led_blink_to);
6415         callout_drain(&sc->sc_watchdog_to);
6416         iwm_stop_device(sc);
6417         if (do_net80211) {
6418                 ieee80211_ifdetach(&sc->sc_ic);
6419         }
6420
6421         iwm_phy_db_free(sc->sc_phy_db);
6422         sc->sc_phy_db = NULL;
6423
6424         iwm_free_nvm_data(sc->nvm_data);
6425
6426         /* Free descriptor rings */
6427         iwm_free_rx_ring(sc, &sc->rxq);
6428         for (i = 0; i < nitems(sc->txq); i++)
6429                 iwm_free_tx_ring(sc, &sc->txq[i]);
6430
6431         /* Free firmware */
6432         if (fw->fw_fp != NULL)
6433                 iwm_fw_info_free(fw);
6434
6435         /* Free scheduler */
6436         iwm_dma_contig_free(&sc->sched_dma);
6437         iwm_dma_contig_free(&sc->ict_dma);
6438         iwm_dma_contig_free(&sc->kw_dma);
6439         iwm_dma_contig_free(&sc->fw_dma);
6440
6441         iwm_free_fw_paging(sc);
6442
6443         /* Finished with the hardware - detach things */
6444         iwm_pci_detach(dev);
6445
6446         if (sc->sc_notif_wait != NULL) {
6447                 iwm_notification_wait_free(sc->sc_notif_wait);
6448                 sc->sc_notif_wait = NULL;
6449         }
6450
6451         mbufq_drain(&sc->sc_snd);
6452         IWM_LOCK_DESTROY(sc);
6453
6454         return (0);
6455 }
6456
6457 static int
6458 iwm_detach(device_t dev)
6459 {
6460         struct iwm_softc *sc = device_get_softc(dev);
6461
6462         return (iwm_detach_local(sc, 1));
6463 }
6464
6465 static device_method_t iwm_pci_methods[] = {
6466         /* Device interface */
6467         DEVMETHOD(device_probe,         iwm_probe),
6468         DEVMETHOD(device_attach,        iwm_attach),
6469         DEVMETHOD(device_detach,        iwm_detach),
6470         DEVMETHOD(device_suspend,       iwm_suspend),
6471         DEVMETHOD(device_resume,        iwm_resume),
6472
6473         DEVMETHOD_END
6474 };
6475
6476 static driver_t iwm_pci_driver = {
6477         "iwm",
6478         iwm_pci_methods,
6479         sizeof (struct iwm_softc)
6480 };
6481
6482 static devclass_t iwm_devclass;
6483
6484 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6485 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6486 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6487 MODULE_DEPEND(iwm, wlan, 1, 1, 1);