if_iwm - Recognize IWM_FW_PAGING_BLOCK_CMD wide cmd response correctly.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150
151 #include <machine/endian.h>
152
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155
156 #include <net/bpf.h>
157
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sta.h"
189 #include "if_iwm_pcie_trans.h"
190 #include "if_iwm_led.h"
191 #include "if_iwm_fw.h"
192
193 const uint8_t iwm_nvm_channels[] = {
194         /* 2.4 GHz */
195         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196         /* 5 GHz */
197         36, 40, 44, 48, 52, 56, 60, 64,
198         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199         149, 153, 157, 161, 165
200 };
201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
202     "IWM_NUM_CHANNELS is too small");
203
204 const uint8_t iwm_nvm_channels_8000[] = {
205         /* 2.4 GHz */
206         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207         /* 5 GHz */
208         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
209         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
210         149, 153, 157, 161, 165, 169, 173, 177, 181
211 };
212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
213     "IWM_NUM_CHANNELS_8000 is too small");
214
215 #define IWM_NUM_2GHZ_CHANNELS   14
216 #define IWM_N_HW_ADDR_MASK      0xF
217
218 /*
219  * XXX For now, there's simply a fixed set of rate table entries
220  * that are populated.
221  */
222 const struct iwm_rate {
223         uint8_t rate;
224         uint8_t plcp;
225 } iwm_rates[] = {
226         {   2,  IWM_RATE_1M_PLCP  },
227         {   4,  IWM_RATE_2M_PLCP  },
228         {  11,  IWM_RATE_5M_PLCP  },
229         {  22,  IWM_RATE_11M_PLCP },
230         {  12,  IWM_RATE_6M_PLCP  },
231         {  18,  IWM_RATE_9M_PLCP  },
232         {  24,  IWM_RATE_12M_PLCP },
233         {  36,  IWM_RATE_18M_PLCP },
234         {  48,  IWM_RATE_24M_PLCP },
235         {  72,  IWM_RATE_36M_PLCP },
236         {  96,  IWM_RATE_48M_PLCP },
237         { 108,  IWM_RATE_54M_PLCP },
238 };
239 #define IWM_RIDX_CCK    0
240 #define IWM_RIDX_OFDM   4
241 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244
245 struct iwm_nvm_section {
246         uint16_t length;
247         uint8_t *data;
248 };
249
250 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
251 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
252
253 struct iwm_mvm_alive_data {
254         int valid;
255         uint32_t scd_base_addr;
256 };
257
258 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
259 static int      iwm_firmware_store_section(struct iwm_softc *,
260                                            enum iwm_ucode_type,
261                                            const uint8_t *, size_t);
262 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
263 static void     iwm_fw_info_free(struct iwm_fw_info *);
264 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
265 #if !defined(__DragonFly__)
266 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
267 #endif
268 static int      iwm_alloc_fwmem(struct iwm_softc *);
269 static int      iwm_alloc_sched(struct iwm_softc *);
270 static int      iwm_alloc_kw(struct iwm_softc *);
271 static int      iwm_alloc_ict(struct iwm_softc *);
272 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
276                                   int);
277 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void     iwm_enable_interrupts(struct iwm_softc *);
280 static void     iwm_restore_interrupts(struct iwm_softc *);
281 static void     iwm_disable_interrupts(struct iwm_softc *);
282 static void     iwm_ict_reset(struct iwm_softc *);
283 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
284 static void     iwm_stop_device(struct iwm_softc *);
285 static void     iwm_mvm_nic_config(struct iwm_softc *);
286 static int      iwm_nic_rx_init(struct iwm_softc *);
287 static int      iwm_nic_tx_init(struct iwm_softc *);
288 static int      iwm_nic_init(struct iwm_softc *);
289 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
290 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
291                                    uint16_t, uint8_t *, uint16_t *);
292 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
293                                      uint16_t *, uint32_t);
294 static uint32_t iwm_eeprom_channel_flags(uint16_t);
295 static void     iwm_add_channel_band(struct iwm_softc *,
296                     struct ieee80211_channel[], int, int *, int, size_t,
297                     const uint8_t[]);
298 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
299                     struct ieee80211_channel[]);
300 static struct iwm_nvm_data *
301         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302                            const uint16_t *, const uint16_t *,
303                            const uint16_t *, const uint16_t *,
304                            const uint16_t *);
305 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
306 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
307                                                struct iwm_nvm_data *,
308                                                const uint16_t *,
309                                                const uint16_t *);
310 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
311                             const uint16_t *);
312 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
313 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
314                                   const uint16_t *);
315 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
316                                    const uint16_t *);
317 static void     iwm_set_radio_cfg(const struct iwm_softc *,
318                                   struct iwm_nvm_data *, uint32_t);
319 static struct iwm_nvm_data *
320         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
321 static int      iwm_nvm_init(struct iwm_softc *);
322 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
323                                       const struct iwm_fw_desc *);
324 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
325                                              bus_addr_t, uint32_t);
326 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
327                                                 const struct iwm_fw_sects *,
328                                                 int, int *);
329 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
330                                            const struct iwm_fw_sects *,
331                                            int, int *);
332 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
333                                                const struct iwm_fw_sects *);
334 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
335                                           const struct iwm_fw_sects *);
336 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
337 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
338 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
339 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
340                                               enum iwm_ucode_type);
341 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
342 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
344                                             struct iwm_rx_phy_info *);
345 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
346                                       struct iwm_rx_packet *);
347 static int      iwm_get_noise(struct iwm_softc *sc,
348                     const struct iwm_mvm_statistics_rx_non_phy *);
349 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
350                                     uint32_t, boolean_t);
351 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
352                                          struct iwm_rx_packet *,
353                                          struct iwm_node *);
354 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
355 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
356 #if 0
357 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
358                                  uint16_t);
359 #endif
360 static const struct iwm_rate *
361         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
362                         struct ieee80211_frame *, struct iwm_tx_cmd *);
363 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
364                        struct ieee80211_node *, int);
365 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
366                              const struct ieee80211_bpf_params *);
367 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
368 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
369 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
370 static struct ieee80211_node *
371                 iwm_node_alloc(struct ieee80211vap *,
372                                const uint8_t[IEEE80211_ADDR_LEN]);
373 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
374 static int      iwm_media_change(struct ifnet *);
375 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
376 static void     iwm_endscan_cb(void *, int);
377 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
378                                         struct iwm_sf_cfg_cmd *,
379                                         struct ieee80211_node *);
380 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
381 static int      iwm_send_bt_init_conf(struct iwm_softc *);
382 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
383 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
384 static int      iwm_init_hw(struct iwm_softc *);
385 static void     iwm_init(struct iwm_softc *);
386 static void     iwm_start(struct iwm_softc *);
387 static void     iwm_stop(struct iwm_softc *);
388 static void     iwm_watchdog(void *);
389 static void     iwm_parent(struct ieee80211com *);
390 #ifdef IWM_DEBUG
391 static const char *
392                 iwm_desc_lookup(uint32_t);
393 static void     iwm_nic_error(struct iwm_softc *);
394 static void     iwm_nic_umac_error(struct iwm_softc *);
395 #endif
396 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
397 static void     iwm_notif_intr(struct iwm_softc *);
398 static void     iwm_intr(void *);
399 static int      iwm_attach(device_t);
400 static int      iwm_is_valid_ether_addr(uint8_t *);
401 static void     iwm_preinit(void *);
402 static int      iwm_detach_local(struct iwm_softc *sc, int);
403 static void     iwm_init_task(void *);
404 static void     iwm_radiotap_attach(struct iwm_softc *);
405 static struct ieee80211vap *
406                 iwm_vap_create(struct ieee80211com *,
407                                const char [IFNAMSIZ], int,
408                                enum ieee80211_opmode, int,
409                                const uint8_t [IEEE80211_ADDR_LEN],
410                                const uint8_t [IEEE80211_ADDR_LEN]);
411 static void     iwm_vap_delete(struct ieee80211vap *);
412 static void     iwm_scan_start(struct ieee80211com *);
413 static void     iwm_scan_end(struct ieee80211com *);
414 static void     iwm_update_mcast(struct ieee80211com *);
415 static void     iwm_set_channel(struct ieee80211com *);
416 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
417 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
418 static int      iwm_detach(device_t);
419
420 #if defined(__DragonFly__)
421 static int      iwm_msi_enable = 1;
422
423 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
424
425 #endif
426
427 /*
428  * Firmware parser.
429  */
430
431 static int
432 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
433 {
434         const struct iwm_fw_cscheme_list *l = (const void *)data;
435
436         if (dlen < sizeof(*l) ||
437             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
438                 return EINVAL;
439
440         /* we don't actually store anything for now, always use s/w crypto */
441
442         return 0;
443 }
444
445 static int
446 iwm_firmware_store_section(struct iwm_softc *sc,
447     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
448 {
449         struct iwm_fw_sects *fws;
450         struct iwm_fw_desc *fwone;
451
452         if (type >= IWM_UCODE_TYPE_MAX)
453                 return EINVAL;
454         if (dlen < sizeof(uint32_t))
455                 return EINVAL;
456
457         fws = &sc->sc_fw.fw_sects[type];
458         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
459                 return EINVAL;
460
461         fwone = &fws->fw_sect[fws->fw_count];
462
463         /* first 32bit are device load offset */
464         memcpy(&fwone->offset, data, sizeof(uint32_t));
465
466         /* rest is data */
467         fwone->data = data + sizeof(uint32_t);
468         fwone->len = dlen - sizeof(uint32_t);
469
470         fws->fw_count++;
471
472         return 0;
473 }
474
475 #define IWM_DEFAULT_SCAN_CHANNELS 40
476
477 struct iwm_tlv_calib_data {
478         uint32_t ucode_type;
479         struct iwm_tlv_calib_ctrl calib;
480 } __packed;
481
482 static int
483 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
484 {
485         const struct iwm_tlv_calib_data *def_calib = data;
486         uint32_t ucode_type = le32toh(def_calib->ucode_type);
487
488         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
489                 device_printf(sc->sc_dev,
490                     "Wrong ucode_type %u for default "
491                     "calibration.\n", ucode_type);
492                 return EINVAL;
493         }
494
495         sc->sc_default_calib[ucode_type].flow_trigger =
496             def_calib->calib.flow_trigger;
497         sc->sc_default_calib[ucode_type].event_trigger =
498             def_calib->calib.event_trigger;
499
500         return 0;
501 }
502
503 static int
504 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
505                         struct iwm_ucode_capabilities *capa)
506 {
507         const struct iwm_ucode_api *ucode_api = (const void *)data;
508         uint32_t api_index = le32toh(ucode_api->api_index);
509         uint32_t api_flags = le32toh(ucode_api->api_flags);
510         int i;
511
512         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
513                 device_printf(sc->sc_dev,
514                     "api flags index %d larger than supported by driver\n",
515                     api_index);
516                 /* don't return an error so we can load FW that has more bits */
517                 return 0;
518         }
519
520         for (i = 0; i < 32; i++) {
521                 if (api_flags & (1U << i))
522                         setbit(capa->enabled_api, i + 32 * api_index);
523         }
524
525         return 0;
526 }
527
528 static int
529 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
530                            struct iwm_ucode_capabilities *capa)
531 {
532         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
533         uint32_t api_index = le32toh(ucode_capa->api_index);
534         uint32_t api_flags = le32toh(ucode_capa->api_capa);
535         int i;
536
537         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
538                 device_printf(sc->sc_dev,
539                     "capa flags index %d larger than supported by driver\n",
540                     api_index);
541                 /* don't return an error so we can load FW that has more bits */
542                 return 0;
543         }
544
545         for (i = 0; i < 32; i++) {
546                 if (api_flags & (1U << i))
547                         setbit(capa->enabled_capa, i + 32 * api_index);
548         }
549
550         return 0;
551 }
552
553 static void
554 iwm_fw_info_free(struct iwm_fw_info *fw)
555 {
556         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
557         fw->fw_fp = NULL;
558         /* don't touch fw->fw_status */
559         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
560 }
561
562 static int
563 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
564 {
565         struct iwm_fw_info *fw = &sc->sc_fw;
566         const struct iwm_tlv_ucode_header *uhdr;
567         const struct iwm_ucode_tlv *tlv;
568         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
569         enum iwm_ucode_tlv_type tlv_type;
570         const struct firmware *fwp;
571         const uint8_t *data;
572         uint32_t tlv_len;
573         uint32_t usniffer_img;
574         const uint8_t *tlv_data;
575         uint32_t paging_mem_size;
576         int num_of_cpus;
577         int error = 0;
578         size_t len;
579
580         if (fw->fw_status == IWM_FW_STATUS_DONE &&
581             ucode_type != IWM_UCODE_INIT)
582                 return 0;
583
584         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
585 #if defined(__DragonFly__)
586                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
587 #else
588                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
589 #endif
590         }
591         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
592
593         if (fw->fw_fp != NULL)
594                 iwm_fw_info_free(fw);
595
596         /*
597          * Load firmware into driver memory.
598          * fw_fp will be set.
599          */
600         IWM_UNLOCK(sc);
601         fwp = firmware_get(sc->cfg->fw_name);
602         IWM_LOCK(sc);
603         if (fwp == NULL) {
604                 device_printf(sc->sc_dev,
605                     "could not read firmware %s (error %d)\n",
606                     sc->cfg->fw_name, error);
607                 goto out;
608         }
609         fw->fw_fp = fwp;
610
611         /* (Re-)Initialize default values. */
612         capa->flags = 0;
613         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
614         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
615         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
616         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
617         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
618
619         /*
620          * Parse firmware contents
621          */
622
623         uhdr = (const void *)fw->fw_fp->data;
624         if (*(const uint32_t *)fw->fw_fp->data != 0
625             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
626                 device_printf(sc->sc_dev, "invalid firmware %s\n",
627                     sc->cfg->fw_name);
628                 error = EINVAL;
629                 goto out;
630         }
631
632         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
633             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
634             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
635             IWM_UCODE_API(le32toh(uhdr->ver)));
636         data = uhdr->data;
637         len = fw->fw_fp->datasize - sizeof(*uhdr);
638
639         while (len >= sizeof(*tlv)) {
640                 len -= sizeof(*tlv);
641                 tlv = (const void *)data;
642
643                 tlv_len = le32toh(tlv->length);
644                 tlv_type = le32toh(tlv->type);
645                 tlv_data = tlv->data;
646
647                 if (len < tlv_len) {
648                         device_printf(sc->sc_dev,
649                             "firmware too short: %zu bytes\n",
650                             len);
651                         error = EINVAL;
652                         goto parse_out;
653                 }
654                 len -= roundup2(tlv_len, 4);
655                 data += sizeof(tlv) + roundup2(tlv_len, 4);
656
657                 switch ((int)tlv_type) {
658                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
659                         if (tlv_len != sizeof(uint32_t)) {
660                                 device_printf(sc->sc_dev,
661                                     "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
662                                     __func__,
663                                     (int) tlv_len);
664                                 error = EINVAL;
665                                 goto parse_out;
666                         }
667                         capa->max_probe_length =
668                             le32_to_cpup((const uint32_t *)tlv_data);
669                         /* limit it to something sensible */
670                         if (capa->max_probe_length >
671                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
672                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
673                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
674                                     "ridiculous\n", __func__);
675                                 error = EINVAL;
676                                 goto parse_out;
677                         }
678                         break;
679                 case IWM_UCODE_TLV_PAN:
680                         if (tlv_len) {
681                                 device_printf(sc->sc_dev,
682                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
683                                     __func__,
684                                     (int) tlv_len);
685                                 error = EINVAL;
686                                 goto parse_out;
687                         }
688                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
689                         break;
690                 case IWM_UCODE_TLV_FLAGS:
691                         if (tlv_len < sizeof(uint32_t)) {
692                                 device_printf(sc->sc_dev,
693                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
694                                     __func__,
695                                     (int) tlv_len);
696                                 error = EINVAL;
697                                 goto parse_out;
698                         }
699                         if (tlv_len % sizeof(uint32_t)) {
700                                 device_printf(sc->sc_dev,
701                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
702                                     __func__,
703                                     (int) tlv_len);
704                                 error = EINVAL;
705                                 goto parse_out;
706                         }
707                         /*
708                          * Apparently there can be many flags, but Linux driver
709                          * parses only the first one, and so do we.
710                          *
711                          * XXX: why does this override IWM_UCODE_TLV_PAN?
712                          * Intentional or a bug?  Observations from
713                          * current firmware file:
714                          *  1) TLV_PAN is parsed first
715                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
716                          * ==> this resets TLV_PAN to itself... hnnnk
717                          */
718                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
719                         break;
720                 case IWM_UCODE_TLV_CSCHEME:
721                         if ((error = iwm_store_cscheme(sc,
722                             tlv_data, tlv_len)) != 0) {
723                                 device_printf(sc->sc_dev,
724                                     "%s: iwm_store_cscheme(): returned %d\n",
725                                     __func__,
726                                     error);
727                                 goto parse_out;
728                         }
729                         break;
730                 case IWM_UCODE_TLV_NUM_OF_CPU:
731                         if (tlv_len != sizeof(uint32_t)) {
732                                 device_printf(sc->sc_dev,
733                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
734                                     __func__,
735                                     (int) tlv_len);
736                                 error = EINVAL;
737                                 goto parse_out;
738                         }
739                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
740                         if (num_of_cpus == 2) {
741                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
742                                         TRUE;
743                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
744                                         TRUE;
745                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
746                                         TRUE;
747                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
748                                 device_printf(sc->sc_dev,
749                                     "%s: Driver supports only 1 or 2 CPUs\n",
750                                     __func__);
751                                 error = EINVAL;
752                                 goto parse_out;
753                         }
754                         break;
755                 case IWM_UCODE_TLV_SEC_RT:
756                         if ((error = iwm_firmware_store_section(sc,
757                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
758                                 device_printf(sc->sc_dev,
759                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
760                                     __func__,
761                                     error);
762                                 goto parse_out;
763                         }
764                         break;
765                 case IWM_UCODE_TLV_SEC_INIT:
766                         if ((error = iwm_firmware_store_section(sc,
767                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
768                                 device_printf(sc->sc_dev,
769                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
770                                     __func__,
771                                     error);
772                                 goto parse_out;
773                         }
774                         break;
775                 case IWM_UCODE_TLV_SEC_WOWLAN:
776                         if ((error = iwm_firmware_store_section(sc,
777                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
778                                 device_printf(sc->sc_dev,
779                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
780                                     __func__,
781                                     error);
782                                 goto parse_out;
783                         }
784                         break;
785                 case IWM_UCODE_TLV_DEF_CALIB:
786                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
787                                 device_printf(sc->sc_dev,
788                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
789                                     __func__,
790                                     (int) tlv_len,
791                                     (int) sizeof(struct iwm_tlv_calib_data));
792                                 error = EINVAL;
793                                 goto parse_out;
794                         }
795                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
796                                 device_printf(sc->sc_dev,
797                                     "%s: iwm_set_default_calib() failed: %d\n",
798                                     __func__,
799                                     error);
800                                 goto parse_out;
801                         }
802                         break;
803                 case IWM_UCODE_TLV_PHY_SKU:
804                         if (tlv_len != sizeof(uint32_t)) {
805                                 error = EINVAL;
806                                 device_printf(sc->sc_dev,
807                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
808                                     __func__,
809                                     (int) tlv_len);
810                                 goto parse_out;
811                         }
812                         sc->sc_fw.phy_config =
813                             le32_to_cpup((const uint32_t *)tlv_data);
814                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
815                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
816                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
817                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
818                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
819                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
820                         break;
821
822                 case IWM_UCODE_TLV_API_CHANGES_SET: {
823                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
824                                 error = EINVAL;
825                                 goto parse_out;
826                         }
827                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
828                                 error = EINVAL;
829                                 goto parse_out;
830                         }
831                         break;
832                 }
833
834                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
835                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
836                                 error = EINVAL;
837                                 goto parse_out;
838                         }
839                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
840                                 error = EINVAL;
841                                 goto parse_out;
842                         }
843                         break;
844                 }
845
846                 case 48: /* undocumented TLV */
847                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
848                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
849                         /* ignore, not used by current driver */
850                         break;
851
852                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
853                         if ((error = iwm_firmware_store_section(sc,
854                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
855                             tlv_len)) != 0)
856                                 goto parse_out;
857                         break;
858
859                 case IWM_UCODE_TLV_PAGING:
860                         if (tlv_len != sizeof(uint32_t)) {
861                                 error = EINVAL;
862                                 goto parse_out;
863                         }
864                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
865
866                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
867                             "%s: Paging: paging enabled (size = %u bytes)\n",
868                             __func__, paging_mem_size);
869                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
870                                 device_printf(sc->sc_dev,
871                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
872                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
873                                 error = EINVAL;
874                                 goto out;
875                         }
876                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
877                                 device_printf(sc->sc_dev,
878                                     "%s: Paging: image isn't multiple %u\n",
879                                     __func__, IWM_FW_PAGING_SIZE);
880                                 error = EINVAL;
881                                 goto out;
882                         }
883
884                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
885                             paging_mem_size;
886                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
887                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
888                             paging_mem_size;
889                         break;
890
891                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
892                         if (tlv_len != sizeof(uint32_t)) {
893                                 error = EINVAL;
894                                 goto parse_out;
895                         }
896                         capa->n_scan_channels =
897                             le32_to_cpup((const uint32_t *)tlv_data);
898                         break;
899
900                 case IWM_UCODE_TLV_FW_VERSION:
901                         if (tlv_len != sizeof(uint32_t) * 3) {
902                                 error = EINVAL;
903                                 goto parse_out;
904                         }
905                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
906                             "%d.%d.%d",
907                             le32toh(((const uint32_t *)tlv_data)[0]),
908                             le32toh(((const uint32_t *)tlv_data)[1]),
909                             le32toh(((const uint32_t *)tlv_data)[2]));
910                         break;
911
912                 case IWM_UCODE_TLV_FW_MEM_SEG:
913                         break;
914
915                 default:
916                         device_printf(sc->sc_dev,
917                             "%s: unknown firmware section %d, abort\n",
918                             __func__, tlv_type);
919                         error = EINVAL;
920                         goto parse_out;
921                 }
922         }
923
924         KASSERT(error == 0, ("unhandled error"));
925
926  parse_out:
927         if (error) {
928                 device_printf(sc->sc_dev, "firmware parse error %d, "
929                     "section type %d\n", error, tlv_type);
930         }
931
932  out:
933         if (error) {
934                 fw->fw_status = IWM_FW_STATUS_NONE;
935                 if (fw->fw_fp != NULL)
936                         iwm_fw_info_free(fw);
937         } else
938                 fw->fw_status = IWM_FW_STATUS_DONE;
939         wakeup(&sc->sc_fw);
940
941         return error;
942 }
943
944 /*
945  * DMA resource routines
946  */
947
948 /* fwmem is used to load firmware onto the card */
949 static int
950 iwm_alloc_fwmem(struct iwm_softc *sc)
951 {
952         /* Must be aligned on a 16-byte boundary. */
953         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
954             IWM_FH_MEM_TB_MAX_LENGTH, 16);
955 }
956
957 /* tx scheduler rings.  not used? */
958 static int
959 iwm_alloc_sched(struct iwm_softc *sc)
960 {
961         /* TX scheduler rings must be aligned on a 1KB boundary. */
962         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
963             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
964 }
965
966 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
967 static int
968 iwm_alloc_kw(struct iwm_softc *sc)
969 {
970         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
971 }
972
973 /* interrupt cause table */
974 static int
975 iwm_alloc_ict(struct iwm_softc *sc)
976 {
977         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
978             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
979 }
980
981 static int
982 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
983 {
984         bus_size_t size;
985         int i, error;
986
987         ring->cur = 0;
988
989         /* Allocate RX descriptors (256-byte aligned). */
990         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
991         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
992         if (error != 0) {
993                 device_printf(sc->sc_dev,
994                     "could not allocate RX ring DMA memory\n");
995                 goto fail;
996         }
997         ring->desc = ring->desc_dma.vaddr;
998
999         /* Allocate RX status area (16-byte aligned). */
1000         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1001             sizeof(*ring->stat), 16);
1002         if (error != 0) {
1003                 device_printf(sc->sc_dev,
1004                     "could not allocate RX status DMA memory\n");
1005                 goto fail;
1006         }
1007         ring->stat = ring->stat_dma.vaddr;
1008
1009         /* Create RX buffer DMA tag. */
1010 #if defined(__DragonFly__)
1011         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1012                                    0,
1013                                    BUS_SPACE_MAXADDR_32BIT,
1014                                    BUS_SPACE_MAXADDR,
1015                                    NULL, NULL,
1016                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1017                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1018 #else
1019         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1020             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1021             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1022 #endif
1023         if (error != 0) {
1024                 device_printf(sc->sc_dev,
1025                     "%s: could not create RX buf DMA tag, error %d\n",
1026                     __func__, error);
1027                 goto fail;
1028         }
1029
1030         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1031         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1032         if (error != 0) {
1033                 device_printf(sc->sc_dev,
1034                     "%s: could not create RX buf DMA map, error %d\n",
1035                     __func__, error);
1036                 goto fail;
1037         }
1038         /*
1039          * Allocate and map RX buffers.
1040          */
1041         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1042                 struct iwm_rx_data *data = &ring->data[i];
1043                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1044                 if (error != 0) {
1045                         device_printf(sc->sc_dev,
1046                             "%s: could not create RX buf DMA map, error %d\n",
1047                             __func__, error);
1048                         goto fail;
1049                 }
1050                 data->m = NULL;
1051
1052                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1053                         goto fail;
1054                 }
1055         }
1056         return 0;
1057
1058 fail:   iwm_free_rx_ring(sc, ring);
1059         return error;
1060 }
1061
1062 static void
1063 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1064 {
1065         /* Reset the ring state */
1066         ring->cur = 0;
1067
1068         /*
1069          * The hw rx ring index in shared memory must also be cleared,
1070          * otherwise the discrepancy can cause reprocessing chaos.
1071          */
1072         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1073 }
1074
1075 static void
1076 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1077 {
1078         int i;
1079
1080         iwm_dma_contig_free(&ring->desc_dma);
1081         iwm_dma_contig_free(&ring->stat_dma);
1082
1083         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1084                 struct iwm_rx_data *data = &ring->data[i];
1085
1086                 if (data->m != NULL) {
1087                         bus_dmamap_sync(ring->data_dmat, data->map,
1088                             BUS_DMASYNC_POSTREAD);
1089                         bus_dmamap_unload(ring->data_dmat, data->map);
1090                         m_freem(data->m);
1091                         data->m = NULL;
1092                 }
1093                 if (data->map != NULL) {
1094                         bus_dmamap_destroy(ring->data_dmat, data->map);
1095                         data->map = NULL;
1096                 }
1097         }
1098         if (ring->spare_map != NULL) {
1099                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1100                 ring->spare_map = NULL;
1101         }
1102         if (ring->data_dmat != NULL) {
1103                 bus_dma_tag_destroy(ring->data_dmat);
1104                 ring->data_dmat = NULL;
1105         }
1106 }
1107
1108 static int
1109 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1110 {
1111         bus_addr_t paddr;
1112         bus_size_t size;
1113         size_t maxsize;
1114         int nsegments;
1115         int i, error;
1116
1117         ring->qid = qid;
1118         ring->queued = 0;
1119         ring->cur = 0;
1120
1121         /* Allocate TX descriptors (256-byte aligned). */
1122         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1123         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1124         if (error != 0) {
1125                 device_printf(sc->sc_dev,
1126                     "could not allocate TX ring DMA memory\n");
1127                 goto fail;
1128         }
1129         ring->desc = ring->desc_dma.vaddr;
1130
1131         /*
1132          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1133          * to allocate commands space for other rings.
1134          */
1135         if (qid > IWM_MVM_CMD_QUEUE)
1136                 return 0;
1137
1138         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1139         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1140         if (error != 0) {
1141                 device_printf(sc->sc_dev,
1142                     "could not allocate TX cmd DMA memory\n");
1143                 goto fail;
1144         }
1145         ring->cmd = ring->cmd_dma.vaddr;
1146
1147         /* FW commands may require more mapped space than packets. */
1148         if (qid == IWM_MVM_CMD_QUEUE) {
1149                 maxsize = IWM_RBUF_SIZE;
1150                 nsegments = 1;
1151         } else {
1152                 maxsize = MCLBYTES;
1153                 nsegments = IWM_MAX_SCATTER - 2;
1154         }
1155
1156 #if defined(__DragonFly__)
1157         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1158                                    0,
1159                                    BUS_SPACE_MAXADDR_32BIT,
1160                                    BUS_SPACE_MAXADDR,
1161                                    NULL, NULL,
1162                                    maxsize, nsegments, maxsize,
1163                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1164 #else
1165         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1166             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1167             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1168 #endif
1169         if (error != 0) {
1170                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1171                 goto fail;
1172         }
1173
1174         paddr = ring->cmd_dma.paddr;
1175         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1176                 struct iwm_tx_data *data = &ring->data[i];
1177
1178                 data->cmd_paddr = paddr;
1179                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1180                     + offsetof(struct iwm_tx_cmd, scratch);
1181                 paddr += sizeof(struct iwm_device_cmd);
1182
1183                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1184                 if (error != 0) {
1185                         device_printf(sc->sc_dev,
1186                             "could not create TX buf DMA map\n");
1187                         goto fail;
1188                 }
1189         }
1190         KASSERT(paddr == ring->cmd_dma.paddr + size,
1191             ("invalid physical address"));
1192         return 0;
1193
1194 fail:   iwm_free_tx_ring(sc, ring);
1195         return error;
1196 }
1197
1198 static void
1199 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1200 {
1201         int i;
1202
1203         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1204                 struct iwm_tx_data *data = &ring->data[i];
1205
1206                 if (data->m != NULL) {
1207                         bus_dmamap_sync(ring->data_dmat, data->map,
1208                             BUS_DMASYNC_POSTWRITE);
1209                         bus_dmamap_unload(ring->data_dmat, data->map);
1210                         m_freem(data->m);
1211                         data->m = NULL;
1212                 }
1213         }
1214         /* Clear TX descriptors. */
1215         memset(ring->desc, 0, ring->desc_dma.size);
1216         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1217             BUS_DMASYNC_PREWRITE);
1218         sc->qfullmsk &= ~(1 << ring->qid);
1219         ring->queued = 0;
1220         ring->cur = 0;
1221
1222         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1223                 iwm_pcie_clear_cmd_in_flight(sc);
1224 }
1225
1226 static void
1227 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1228 {
1229         int i;
1230
1231         iwm_dma_contig_free(&ring->desc_dma);
1232         iwm_dma_contig_free(&ring->cmd_dma);
1233
1234         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1235                 struct iwm_tx_data *data = &ring->data[i];
1236
1237                 if (data->m != NULL) {
1238                         bus_dmamap_sync(ring->data_dmat, data->map,
1239                             BUS_DMASYNC_POSTWRITE);
1240                         bus_dmamap_unload(ring->data_dmat, data->map);
1241                         m_freem(data->m);
1242                         data->m = NULL;
1243                 }
1244                 if (data->map != NULL) {
1245                         bus_dmamap_destroy(ring->data_dmat, data->map);
1246                         data->map = NULL;
1247                 }
1248         }
1249         if (ring->data_dmat != NULL) {
1250                 bus_dma_tag_destroy(ring->data_dmat);
1251                 ring->data_dmat = NULL;
1252         }
1253 }
1254
1255 /*
1256  * High-level hardware frobbing routines
1257  */
1258
1259 static void
1260 iwm_enable_interrupts(struct iwm_softc *sc)
1261 {
1262         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1263         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1264 }
1265
1266 static void
1267 iwm_restore_interrupts(struct iwm_softc *sc)
1268 {
1269         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1270 }
1271
1272 static void
1273 iwm_disable_interrupts(struct iwm_softc *sc)
1274 {
1275         /* disable interrupts */
1276         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1277
1278         /* acknowledge all interrupts */
1279         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1280         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1281 }
1282
1283 static void
1284 iwm_ict_reset(struct iwm_softc *sc)
1285 {
1286         iwm_disable_interrupts(sc);
1287
1288         /* Reset ICT table. */
1289         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1290         sc->ict_cur = 0;
1291
1292         /* Set physical address of ICT table (4KB aligned). */
1293         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1294             IWM_CSR_DRAM_INT_TBL_ENABLE
1295             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1296             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1297             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1298
1299         /* Switch to ICT interrupt mode in driver. */
1300         sc->sc_flags |= IWM_FLAG_USE_ICT;
1301
1302         /* Re-enable interrupts. */
1303         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1304         iwm_enable_interrupts(sc);
1305 }
1306
1307 /*
1308  * Since this .. hard-resets things, it's time to actually
1309  * mark the first vap (if any) as having no mac context.
1310  * It's annoying, but since the driver is potentially being
1311  * stop/start'ed whilst active (thanks openbsd port!) we
1312  * have to correctly track this.
1313  */
1314 static void
1315 iwm_stop_device(struct iwm_softc *sc)
1316 {
1317         struct ieee80211com *ic = &sc->sc_ic;
1318         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1319         int chnl, qid;
1320         uint32_t mask = 0;
1321
1322         /* tell the device to stop sending interrupts */
1323         iwm_disable_interrupts(sc);
1324
1325         /*
1326          * FreeBSD-local: mark the first vap as not-uploaded,
1327          * so the next transition through auth/assoc
1328          * will correctly populate the MAC context.
1329          */
1330         if (vap) {
1331                 struct iwm_vap *iv = IWM_VAP(vap);
1332                 iv->phy_ctxt = NULL;
1333                 iv->is_uploaded = 0;
1334         }
1335
1336         /* device going down, Stop using ICT table */
1337         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1338
1339         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1340
1341         if (iwm_nic_lock(sc)) {
1342                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1343
1344                 /* Stop each Tx DMA channel */
1345                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1346                         IWM_WRITE(sc,
1347                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1348                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1349                 }
1350
1351                 /* Wait for DMA channels to be idle */
1352                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1353                     5000)) {
1354                         device_printf(sc->sc_dev,
1355                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1356                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1357                 }
1358                 iwm_nic_unlock(sc);
1359         }
1360         iwm_pcie_rx_stop(sc);
1361
1362         /* Stop RX ring. */
1363         iwm_reset_rx_ring(sc, &sc->rxq);
1364
1365         /* Reset all TX rings. */
1366         for (qid = 0; qid < nitems(sc->txq); qid++)
1367                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1368
1369         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1370                 /* Power-down device's busmaster DMA clocks */
1371                 if (iwm_nic_lock(sc)) {
1372                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1373                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1374                         iwm_nic_unlock(sc);
1375                 }
1376                 DELAY(5);
1377         }
1378
1379         /* Make sure (redundant) we've released our request to stay awake */
1380         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1381             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1382
1383         /* Stop the device, and put it in low power state */
1384         iwm_apm_stop(sc);
1385
1386         /* stop and reset the on-board processor */
1387         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1388         DELAY(1000);
1389
1390         /*
1391          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1392          * This is a bug in certain verions of the hardware.
1393          * Certain devices also keep sending HW RF kill interrupt all
1394          * the time, unless the interrupt is ACKed even if the interrupt
1395          * should be masked. Re-ACK all the interrupts here.
1396          */
1397         iwm_disable_interrupts(sc);
1398
1399         /*
1400          * Even if we stop the HW, we still want the RF kill
1401          * interrupt
1402          */
1403         iwm_enable_rfkill_int(sc);
1404         iwm_check_rfkill(sc);
1405 }
1406
1407 static void
1408 iwm_mvm_nic_config(struct iwm_softc *sc)
1409 {
1410         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1411         uint32_t reg_val = 0;
1412         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1413
1414         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1415             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1416         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1417             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1418         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1419             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1420
1421         /* SKU control */
1422         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1423             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1424         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1425             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1426
1427         /* radio configuration */
1428         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1429         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1430         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1431
1432         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1433
1434         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1435             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1436             radio_cfg_step, radio_cfg_dash);
1437
1438         /*
1439          * W/A : NIC is stuck in a reset state after Early PCIe power off
1440          * (PCIe power is lost before PERST# is asserted), causing ME FW
1441          * to lose ownership and not being able to obtain it back.
1442          */
1443         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1444                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1445                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1446                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1447         }
1448 }
1449
1450 static int
1451 iwm_nic_rx_init(struct iwm_softc *sc)
1452 {
1453         /*
1454          * Initialize RX ring.  This is from the iwn driver.
1455          */
1456         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1457
1458         /* Stop Rx DMA */
1459         iwm_pcie_rx_stop(sc);
1460
1461         if (!iwm_nic_lock(sc))
1462                 return EBUSY;
1463
1464         /* reset and flush pointers */
1465         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1466         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1467         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1468         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1469
1470         /* Set physical address of RX ring (256-byte aligned). */
1471         IWM_WRITE(sc,
1472             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1473
1474         /* Set physical address of RX status (16-byte aligned). */
1475         IWM_WRITE(sc,
1476             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1477
1478 #if defined(__DragonFly__)
1479         /* Force serialization (probably not needed but don't trust the HW) */
1480         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1481 #endif
1482
1483         /* Enable Rx DMA
1484          * XXX 5000 HW isn't supported by the iwm(4) driver.
1485          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1486          *      the credit mechanism in 5000 HW RX FIFO
1487          * Direct rx interrupts to hosts
1488          * Rx buffer size 4 or 8k or 12k
1489          * RB timeout 0x10
1490          * 256 RBDs
1491          */
1492         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1493             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1494             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1495             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1496             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1497             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1498             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1499
1500         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1501
1502         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1503         if (sc->cfg->host_interrupt_operation_mode)
1504                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1505
1506         /*
1507          * Thus sayeth el jefe (iwlwifi) via a comment:
1508          *
1509          * This value should initially be 0 (before preparing any
1510          * RBs), should be 8 after preparing the first 8 RBs (for example)
1511          */
1512         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1513
1514         iwm_nic_unlock(sc);
1515
1516         return 0;
1517 }
1518
1519 static int
1520 iwm_nic_tx_init(struct iwm_softc *sc)
1521 {
1522         int qid;
1523
1524         if (!iwm_nic_lock(sc))
1525                 return EBUSY;
1526
1527         /* Deactivate TX scheduler. */
1528         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1529
1530         /* Set physical address of "keep warm" page (16-byte aligned). */
1531         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1532
1533         /* Initialize TX rings. */
1534         for (qid = 0; qid < nitems(sc->txq); qid++) {
1535                 struct iwm_tx_ring *txq = &sc->txq[qid];
1536
1537                 /* Set physical address of TX ring (256-byte aligned). */
1538                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1539                     txq->desc_dma.paddr >> 8);
1540                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1541                     "%s: loading ring %d descriptors (%p) at %lx\n",
1542                     __func__,
1543                     qid, txq->desc,
1544                     (unsigned long) (txq->desc_dma.paddr >> 8));
1545         }
1546
1547         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1548
1549         iwm_nic_unlock(sc);
1550
1551         return 0;
1552 }
1553
1554 static int
1555 iwm_nic_init(struct iwm_softc *sc)
1556 {
1557         int error;
1558
1559         iwm_apm_init(sc);
1560         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1561                 iwm_set_pwr(sc);
1562
1563         iwm_mvm_nic_config(sc);
1564
1565         if ((error = iwm_nic_rx_init(sc)) != 0)
1566                 return error;
1567
1568         /*
1569          * Ditto for TX, from iwn
1570          */
1571         if ((error = iwm_nic_tx_init(sc)) != 0)
1572                 return error;
1573
1574         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1575             "%s: shadow registers enabled\n", __func__);
1576         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1577
1578         return 0;
1579 }
1580
1581 int
1582 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1583 {
1584         if (!iwm_nic_lock(sc)) {
1585                 device_printf(sc->sc_dev,
1586                     "%s: cannot enable txq %d\n",
1587                     __func__,
1588                     qid);
1589                 return EBUSY;
1590         }
1591
1592         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1593
1594         if (qid == IWM_MVM_CMD_QUEUE) {
1595                 /* unactivate before configuration */
1596                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1597                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1598                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1599
1600                 iwm_nic_unlock(sc);
1601
1602                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1603
1604                 if (!iwm_nic_lock(sc)) {
1605                         device_printf(sc->sc_dev,
1606                             "%s: cannot enable txq %d\n", __func__, qid);
1607                         return EBUSY;
1608                 }
1609                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1610                 iwm_nic_unlock(sc);
1611
1612                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1613                 /* Set scheduler window size and frame limit. */
1614                 iwm_write_mem32(sc,
1615                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1616                     sizeof(uint32_t),
1617                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1618                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1619                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1620                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1621
1622                 if (!iwm_nic_lock(sc)) {
1623                         device_printf(sc->sc_dev,
1624                             "%s: cannot enable txq %d\n", __func__, qid);
1625                         return EBUSY;
1626                 }
1627                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1628                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1629                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1630                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1631                     IWM_SCD_QUEUE_STTS_REG_MSK);
1632         } else {
1633                 struct iwm_scd_txq_cfg_cmd cmd;
1634                 int error;
1635
1636                 iwm_nic_unlock(sc);
1637
1638                 memset(&cmd, 0, sizeof(cmd));
1639                 cmd.scd_queue = qid;
1640                 cmd.enable = 1;
1641                 cmd.sta_id = sta_id;
1642                 cmd.tx_fifo = fifo;
1643                 cmd.aggregate = 0;
1644                 cmd.window = IWM_FRAME_LIMIT;
1645
1646                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1647                     sizeof(cmd), &cmd);
1648                 if (error) {
1649                         device_printf(sc->sc_dev,
1650                             "cannot enable txq %d\n", qid);
1651                         return error;
1652                 }
1653
1654                 if (!iwm_nic_lock(sc))
1655                         return EBUSY;
1656         }
1657
1658         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1659             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1660
1661         iwm_nic_unlock(sc);
1662
1663         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1664             __func__, qid, fifo);
1665
1666         return 0;
1667 }
1668
1669 static int
1670 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1671 {
1672         int error, chnl;
1673
1674         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1675             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1676
1677         if (!iwm_nic_lock(sc))
1678                 return EBUSY;
1679
1680         iwm_ict_reset(sc);
1681
1682         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1683         if (scd_base_addr != 0 &&
1684             scd_base_addr != sc->scd_base_addr) {
1685                 device_printf(sc->sc_dev,
1686                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1687                     __func__, sc->scd_base_addr, scd_base_addr);
1688         }
1689
1690         iwm_nic_unlock(sc);
1691
1692         /* reset context data, TX status and translation data */
1693         error = iwm_write_mem(sc,
1694             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1695             NULL, clear_dwords);
1696         if (error)
1697                 return EBUSY;
1698
1699         if (!iwm_nic_lock(sc))
1700                 return EBUSY;
1701
1702         /* Set physical address of TX scheduler rings (1KB aligned). */
1703         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1704
1705         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1706
1707         iwm_nic_unlock(sc);
1708
1709         /* enable command channel */
1710         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1711         if (error)
1712                 return error;
1713
1714         if (!iwm_nic_lock(sc))
1715                 return EBUSY;
1716
1717         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1718
1719         /* Enable DMA channels. */
1720         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1721                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1722                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1723                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1724         }
1725
1726         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1727             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1728
1729         iwm_nic_unlock(sc);
1730
1731         /* Enable L1-Active */
1732         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1733                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1734                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1735         }
1736
1737         return error;
1738 }
1739
1740 /*
1741  * NVM read access and content parsing.  We do not support
1742  * external NVM or writing NVM.
1743  * iwlwifi/mvm/nvm.c
1744  */
1745
1746 /* Default NVM size to read */
1747 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1748
1749 #define IWM_NVM_WRITE_OPCODE 1
1750 #define IWM_NVM_READ_OPCODE 0
1751
1752 /* load nvm chunk response */
1753 enum {
1754         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1755         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1756 };
1757
1758 static int
1759 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1760         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1761 {
1762         struct iwm_nvm_access_cmd nvm_access_cmd = {
1763                 .offset = htole16(offset),
1764                 .length = htole16(length),
1765                 .type = htole16(section),
1766                 .op_code = IWM_NVM_READ_OPCODE,
1767         };
1768         struct iwm_nvm_access_resp *nvm_resp;
1769         struct iwm_rx_packet *pkt;
1770         struct iwm_host_cmd cmd = {
1771                 .id = IWM_NVM_ACCESS_CMD,
1772                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1773                 .data = { &nvm_access_cmd, },
1774         };
1775         int ret, bytes_read, offset_read;
1776         uint8_t *resp_data;
1777
1778         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1779
1780         ret = iwm_send_cmd(sc, &cmd);
1781         if (ret) {
1782                 device_printf(sc->sc_dev,
1783                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1784                 return ret;
1785         }
1786
1787         pkt = cmd.resp_pkt;
1788
1789         /* Extract NVM response */
1790         nvm_resp = (void *)pkt->data;
1791         ret = le16toh(nvm_resp->status);
1792         bytes_read = le16toh(nvm_resp->length);
1793         offset_read = le16toh(nvm_resp->offset);
1794         resp_data = nvm_resp->data;
1795         if (ret) {
1796                 if ((offset != 0) &&
1797                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1798                         /*
1799                          * meaning of NOT_VALID_ADDRESS:
1800                          * driver try to read chunk from address that is
1801                          * multiple of 2K and got an error since addr is empty.
1802                          * meaning of (offset != 0): driver already
1803                          * read valid data from another chunk so this case
1804                          * is not an error.
1805                          */
1806                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1807                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1808                                     offset);
1809                         *len = 0;
1810                         ret = 0;
1811                 } else {
1812                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1813                                     "NVM access command failed with status %d\n", ret);
1814                         ret = EIO;
1815                 }
1816                 goto exit;
1817         }
1818
1819         if (offset_read != offset) {
1820                 device_printf(sc->sc_dev,
1821                     "NVM ACCESS response with invalid offset %d\n",
1822                     offset_read);
1823                 ret = EINVAL;
1824                 goto exit;
1825         }
1826
1827         if (bytes_read > length) {
1828                 device_printf(sc->sc_dev,
1829                     "NVM ACCESS response with too much data "
1830                     "(%d bytes requested, %d bytes received)\n",
1831                     length, bytes_read);
1832                 ret = EINVAL;
1833                 goto exit;
1834         }
1835
1836         /* Write data to NVM */
1837         memcpy(data + offset, resp_data, bytes_read);
1838         *len = bytes_read;
1839
1840  exit:
1841         iwm_free_resp(sc, &cmd);
1842         return ret;
1843 }
1844
1845 /*
1846  * Reads an NVM section completely.
1847  * NICs prior to 7000 family don't have a real NVM, but just read
1848  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1849  * by uCode, we need to manually check in this case that we don't
1850  * overflow and try to read more than the EEPROM size.
1851  * For 7000 family NICs, we supply the maximal size we can read, and
1852  * the uCode fills the response with as much data as we can,
1853  * without overflowing, so no check is needed.
1854  */
1855 static int
1856 iwm_nvm_read_section(struct iwm_softc *sc,
1857         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1858 {
1859         uint16_t seglen, length, offset = 0;
1860         int ret;
1861
1862         /* Set nvm section read length */
1863         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1864
1865         seglen = length;
1866
1867         /* Read the NVM until exhausted (reading less than requested) */
1868         while (seglen == length) {
1869                 /* Check no memory assumptions fail and cause an overflow */
1870                 if ((size_read + offset + length) >
1871                     sc->cfg->eeprom_size) {
1872                         device_printf(sc->sc_dev,
1873                             "EEPROM size is too small for NVM\n");
1874                         return ENOBUFS;
1875                 }
1876
1877                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1878                 if (ret) {
1879                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1880                                     "Cannot read NVM from section %d offset %d, length %d\n",
1881                                     section, offset, length);
1882                         return ret;
1883                 }
1884                 offset += seglen;
1885         }
1886
1887         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1888                     "NVM section %d read completed\n", section);
1889         *len = offset;
1890         return 0;
1891 }
1892
1893 /* NVM offsets (in words) definitions */
1894 enum iwm_nvm_offsets {
1895         /* NVM HW-Section offset (in words) definitions */
1896         IWM_HW_ADDR = 0x15,
1897
1898 /* NVM SW-Section offset (in words) definitions */
1899         IWM_NVM_SW_SECTION = 0x1C0,
1900         IWM_NVM_VERSION = 0,
1901         IWM_RADIO_CFG = 1,
1902         IWM_SKU = 2,
1903         IWM_N_HW_ADDRS = 3,
1904         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1905
1906 /* NVM calibration section offset (in words) definitions */
1907         IWM_NVM_CALIB_SECTION = 0x2B8,
1908         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1909 };
1910
1911 enum iwm_8000_nvm_offsets {
1912         /* NVM HW-Section offset (in words) definitions */
1913         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1914         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1915         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1916         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1917         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1918
1919         /* NVM SW-Section offset (in words) definitions */
1920         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1921         IWM_NVM_VERSION_8000 = 0,
1922         IWM_RADIO_CFG_8000 = 0,
1923         IWM_SKU_8000 = 2,
1924         IWM_N_HW_ADDRS_8000 = 3,
1925
1926         /* NVM REGULATORY -Section offset (in words) definitions */
1927         IWM_NVM_CHANNELS_8000 = 0,
1928         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1929         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1930         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1931
1932         /* NVM calibration section offset (in words) definitions */
1933         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1934         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1935 };
1936
1937 /* SKU Capabilities (actual values from NVM definition) */
1938 enum nvm_sku_bits {
1939         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1940         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1941         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1942         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1943 };
1944
1945 /* radio config bits (actual values from NVM definition) */
1946 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1947 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1948 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1949 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1950 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1951 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1952
1953 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1954 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1955 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1956 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1957 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1958 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1959
1960 #define DEFAULT_MAX_TX_POWER 16
1961
1962 /**
1963  * enum iwm_nvm_channel_flags - channel flags in NVM
1964  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1965  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1966  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1967  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1968  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1969  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1970  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1971  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1972  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1973  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1974  */
1975 enum iwm_nvm_channel_flags {
1976         IWM_NVM_CHANNEL_VALID = (1 << 0),
1977         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1978         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1979         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1980         IWM_NVM_CHANNEL_DFS = (1 << 7),
1981         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1982         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1983         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1984         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1985 };
1986
1987 /*
1988  * Translate EEPROM flags to net80211.
1989  */
1990 static uint32_t
1991 iwm_eeprom_channel_flags(uint16_t ch_flags)
1992 {
1993         uint32_t nflags;
1994
1995         nflags = 0;
1996         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1997                 nflags |= IEEE80211_CHAN_PASSIVE;
1998         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1999                 nflags |= IEEE80211_CHAN_NOADHOC;
2000         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2001                 nflags |= IEEE80211_CHAN_DFS;
2002                 /* Just in case. */
2003                 nflags |= IEEE80211_CHAN_NOADHOC;
2004         }
2005
2006         return (nflags);
2007 }
2008
2009 static void
2010 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2011     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2012     const uint8_t bands[])
2013 {
2014         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2015         uint32_t nflags;
2016         uint16_t ch_flags;
2017         uint8_t ieee;
2018         int error;
2019
2020         for (; ch_idx < ch_num; ch_idx++) {
2021                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2022                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2023                         ieee = iwm_nvm_channels[ch_idx];
2024                 else
2025                         ieee = iwm_nvm_channels_8000[ch_idx];
2026
2027                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2028                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2029                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2030                             ieee, ch_flags,
2031                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2032                             "5.2" : "2.4");
2033                         continue;
2034                 }
2035
2036                 nflags = iwm_eeprom_channel_flags(ch_flags);
2037                 error = ieee80211_add_channel(chans, maxchans, nchans,
2038                     ieee, 0, 0, nflags, bands);
2039                 if (error != 0)
2040                         break;
2041
2042                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2043                     "Ch. %d Flags %x [%sGHz] - Added\n",
2044                     ieee, ch_flags,
2045                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2046                     "5.2" : "2.4");
2047         }
2048 }
2049
2050 static void
2051 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2052     struct ieee80211_channel chans[])
2053 {
2054         struct iwm_softc *sc = ic->ic_softc;
2055         struct iwm_nvm_data *data = sc->nvm_data;
2056         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2057         size_t ch_num;
2058
2059         memset(bands, 0, sizeof(bands));
2060         /* 1-13: 11b/g channels. */
2061         setbit(bands, IEEE80211_MODE_11B);
2062         setbit(bands, IEEE80211_MODE_11G);
2063         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2064             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2065
2066         /* 14: 11b channel only. */
2067         clrbit(bands, IEEE80211_MODE_11G);
2068         iwm_add_channel_band(sc, chans, maxchans, nchans,
2069             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2070
2071         if (data->sku_cap_band_52GHz_enable) {
2072                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2073                         ch_num = nitems(iwm_nvm_channels);
2074                 else
2075                         ch_num = nitems(iwm_nvm_channels_8000);
2076                 memset(bands, 0, sizeof(bands));
2077                 setbit(bands, IEEE80211_MODE_11A);
2078                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2079                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2080         }
2081 }
2082
2083 static void
2084 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2085         const uint16_t *mac_override, const uint16_t *nvm_hw)
2086 {
2087         const uint8_t *hw_addr;
2088
2089         if (mac_override) {
2090                 static const uint8_t reserved_mac[] = {
2091                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2092                 };
2093
2094                 hw_addr = (const uint8_t *)(mac_override +
2095                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2096
2097                 /*
2098                  * Store the MAC address from MAO section.
2099                  * No byte swapping is required in MAO section
2100                  */
2101                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2102
2103                 /*
2104                  * Force the use of the OTP MAC address in case of reserved MAC
2105                  * address in the NVM, or if address is given but invalid.
2106                  */
2107                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2108                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2109                     iwm_is_valid_ether_addr(data->hw_addr) &&
2110                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2111                         return;
2112
2113                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2114                     "%s: mac address from nvm override section invalid\n",
2115                     __func__);
2116         }
2117
2118         if (nvm_hw) {
2119                 /* read the mac address from WFMP registers */
2120                 uint32_t mac_addr0 =
2121                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2122                 uint32_t mac_addr1 =
2123                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2124
2125                 hw_addr = (const uint8_t *)&mac_addr0;
2126                 data->hw_addr[0] = hw_addr[3];
2127                 data->hw_addr[1] = hw_addr[2];
2128                 data->hw_addr[2] = hw_addr[1];
2129                 data->hw_addr[3] = hw_addr[0];
2130
2131                 hw_addr = (const uint8_t *)&mac_addr1;
2132                 data->hw_addr[4] = hw_addr[1];
2133                 data->hw_addr[5] = hw_addr[0];
2134
2135                 return;
2136         }
2137
2138         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2139         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2140 }
2141
2142 static int
2143 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2144             const uint16_t *phy_sku)
2145 {
2146         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2147                 return le16_to_cpup(nvm_sw + IWM_SKU);
2148
2149         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2150 }
2151
2152 static int
2153 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2154 {
2155         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2156                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2157         else
2158                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2159                                                 IWM_NVM_VERSION_8000));
2160 }
2161
2162 static int
2163 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2164                   const uint16_t *phy_sku)
2165 {
2166         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2167                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2168
2169         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2170 }
2171
2172 static int
2173 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2174 {
2175         int n_hw_addr;
2176
2177         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2178                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2179
2180         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2181
2182         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2183 }
2184
2185 static void
2186 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2187                   uint32_t radio_cfg)
2188 {
2189         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2190                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2191                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2192                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2193                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2194                 return;
2195         }
2196
2197         /* set the radio configuration for family 8000 */
2198         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2199         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2200         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2201         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2202         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2203         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2204 }
2205
2206 static int
2207 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2208                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2209 {
2210 #ifdef notyet /* for FAMILY 9000 */
2211         if (cfg->mac_addr_from_csr) {
2212                 iwm_set_hw_address_from_csr(sc, data);
2213         } else
2214 #endif
2215         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2216                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2217
2218                 /* The byte order is little endian 16 bit, meaning 214365 */
2219                 data->hw_addr[0] = hw_addr[1];
2220                 data->hw_addr[1] = hw_addr[0];
2221                 data->hw_addr[2] = hw_addr[3];
2222                 data->hw_addr[3] = hw_addr[2];
2223                 data->hw_addr[4] = hw_addr[5];
2224                 data->hw_addr[5] = hw_addr[4];
2225         } else {
2226                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2227         }
2228
2229         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2230                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2231                 return EINVAL;
2232         }
2233
2234         return 0;
2235 }
2236
2237 static struct iwm_nvm_data *
2238 iwm_parse_nvm_data(struct iwm_softc *sc,
2239                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2240                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2241                    const uint16_t *phy_sku, const uint16_t *regulatory)
2242 {
2243         struct iwm_nvm_data *data;
2244         uint32_t sku, radio_cfg;
2245
2246         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2247                 data = kmalloc(sizeof(*data) +
2248                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2249                     M_DEVBUF, M_WAITOK | M_ZERO);
2250         } else {
2251                 data = kmalloc(sizeof(*data) +
2252                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2253                     M_DEVBUF, M_WAITOK | M_ZERO);
2254         }
2255         if (!data)
2256                 return NULL;
2257
2258         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2259
2260         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2261         iwm_set_radio_cfg(sc, data, radio_cfg);
2262
2263         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2264         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2265         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2266         data->sku_cap_11n_enable = 0;
2267
2268         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2269
2270         /* If no valid mac address was found - bail out */
2271         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2272                 kfree(data, M_DEVBUF);
2273                 return NULL;
2274         }
2275
2276         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2277                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2278                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2279         } else {
2280                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2281                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2282         }
2283
2284         return data;
2285 }
2286
2287 static void
2288 iwm_free_nvm_data(struct iwm_nvm_data *data)
2289 {
2290         if (data != NULL)
2291                 kfree(data, M_DEVBUF);
2292 }
2293
2294 static struct iwm_nvm_data *
2295 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2296 {
2297         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2298
2299         /* Checking for required sections */
2300         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2301                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2302                     !sections[sc->cfg->nvm_hw_section_num].data) {
2303                         device_printf(sc->sc_dev,
2304                             "Can't parse empty OTP/NVM sections\n");
2305                         return NULL;
2306                 }
2307         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2308                 /* SW and REGULATORY sections are mandatory */
2309                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2310                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2311                         device_printf(sc->sc_dev,
2312                             "Can't parse empty OTP/NVM sections\n");
2313                         return NULL;
2314                 }
2315                 /* MAC_OVERRIDE or at least HW section must exist */
2316                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2317                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2318                         device_printf(sc->sc_dev,
2319                             "Can't parse mac_address, empty sections\n");
2320                         return NULL;
2321                 }
2322
2323                 /* PHY_SKU section is mandatory in B0 */
2324                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2325                         device_printf(sc->sc_dev,
2326                             "Can't parse phy_sku in B0, empty sections\n");
2327                         return NULL;
2328                 }
2329         } else {
2330                 panic("unknown device family %d\n", sc->cfg->device_family);
2331         }
2332
2333         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2334         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2335         calib = (const uint16_t *)
2336             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2337         regulatory = (const uint16_t *)
2338             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2339         mac_override = (const uint16_t *)
2340             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2341         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2342
2343         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2344             phy_sku, regulatory);
2345 }
2346
2347 static int
2348 iwm_nvm_init(struct iwm_softc *sc)
2349 {
2350         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2351         int i, ret, section;
2352         uint32_t size_read = 0;
2353         uint8_t *nvm_buffer, *temp;
2354         uint16_t len;
2355
2356         memset(nvm_sections, 0, sizeof(nvm_sections));
2357
2358         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2359                 return EINVAL;
2360
2361         /* load NVM values from nic */
2362         /* Read From FW NVM */
2363         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2364
2365         nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2366             M_INTWAIT | M_ZERO);
2367         if (!nvm_buffer)
2368                 return ENOMEM;
2369         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2370                 /* we override the constness for initial read */
2371                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2372                                            &len, size_read);
2373                 if (ret)
2374                         continue;
2375                 size_read += len;
2376                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2377                 if (!temp) {
2378                         ret = ENOMEM;
2379                         break;
2380                 }
2381                 memcpy(temp, nvm_buffer, len);
2382
2383                 nvm_sections[section].data = temp;
2384                 nvm_sections[section].length = len;
2385         }
2386         if (!size_read)
2387                 device_printf(sc->sc_dev, "OTP is blank\n");
2388         kfree(nvm_buffer, M_DEVBUF);
2389
2390         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2391         if (!sc->nvm_data)
2392                 return EINVAL;
2393         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2394                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2395
2396         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2397                 if (nvm_sections[i].data != NULL)
2398                         kfree(nvm_sections[i].data, M_DEVBUF);
2399         }
2400
2401         return 0;
2402 }
2403
2404 static int
2405 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2406         const struct iwm_fw_desc *section)
2407 {
2408         struct iwm_dma_info *dma = &sc->fw_dma;
2409         uint8_t *v_addr;
2410         bus_addr_t p_addr;
2411         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2412         int ret = 0;
2413
2414         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2415                     "%s: [%d] uCode section being loaded...\n",
2416                     __func__, section_num);
2417
2418         v_addr = dma->vaddr;
2419         p_addr = dma->paddr;
2420
2421         for (offset = 0; offset < section->len; offset += chunk_sz) {
2422                 uint32_t copy_size, dst_addr;
2423                 int extended_addr = FALSE;
2424
2425                 copy_size = MIN(chunk_sz, section->len - offset);
2426                 dst_addr = section->offset + offset;
2427
2428                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2429                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2430                         extended_addr = TRUE;
2431
2432                 if (extended_addr)
2433                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2434                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2435
2436                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2437                     copy_size);
2438                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2439                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2440                                                    copy_size);
2441
2442                 if (extended_addr)
2443                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2444                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2445
2446                 if (ret) {
2447                         device_printf(sc->sc_dev,
2448                             "%s: Could not load the [%d] uCode section\n",
2449                             __func__, section_num);
2450                         break;
2451                 }
2452         }
2453
2454         return ret;
2455 }
2456
2457 /*
2458  * ucode
2459  */
2460 static int
2461 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2462                              bus_addr_t phy_addr, uint32_t byte_cnt)
2463 {
2464         int ret;
2465
2466         sc->sc_fw_chunk_done = 0;
2467
2468         if (!iwm_nic_lock(sc))
2469                 return EBUSY;
2470
2471         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2472             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2473
2474         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2475             dst_addr);
2476
2477         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2478             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2479
2480         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2481             (iwm_get_dma_hi_addr(phy_addr)
2482              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2483
2484         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2485             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2486             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2487             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2488
2489         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2490             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2491             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2492             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2493
2494         iwm_nic_unlock(sc);
2495
2496         /* wait up to 5s for this segment to load */
2497         ret = 0;
2498         while (!sc->sc_fw_chunk_done) {
2499 #if defined(__DragonFly__)
2500                 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2501 #else
2502                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2503 #endif
2504                 if (ret)
2505                         break;
2506         }
2507
2508         if (ret != 0) {
2509                 device_printf(sc->sc_dev,
2510                     "fw chunk addr 0x%x len %d failed to load\n",
2511                     dst_addr, byte_cnt);
2512                 return ETIMEDOUT;
2513         }
2514
2515         return 0;
2516 }
2517
2518 static int
2519 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2520         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2521 {
2522         int shift_param;
2523         int i, ret = 0, sec_num = 0x1;
2524         uint32_t val, last_read_idx = 0;
2525
2526         if (cpu == 1) {
2527                 shift_param = 0;
2528                 *first_ucode_section = 0;
2529         } else {
2530                 shift_param = 16;
2531                 (*first_ucode_section)++;
2532         }
2533
2534         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2535                 last_read_idx = i;
2536
2537                 /*
2538                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2539                  * CPU1 to CPU2.
2540                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2541                  * CPU2 non paged to CPU2 paging sec.
2542                  */
2543                 if (!image->fw_sect[i].data ||
2544                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2545                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2546                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2547                                     "Break since Data not valid or Empty section, sec = %d\n",
2548                                     i);
2549                         break;
2550                 }
2551                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2552                 if (ret)
2553                         return ret;
2554
2555                 /* Notify the ucode of the loaded section number and status */
2556                 if (iwm_nic_lock(sc)) {
2557                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2558                         val = val | (sec_num << shift_param);
2559                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2560                         sec_num = (sec_num << 1) | 0x1;
2561                         iwm_nic_unlock(sc);
2562                 }
2563         }
2564
2565         *first_ucode_section = last_read_idx;
2566
2567         iwm_enable_interrupts(sc);
2568
2569         if (iwm_nic_lock(sc)) {
2570                 if (cpu == 1)
2571                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2572                 else
2573                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2574                 iwm_nic_unlock(sc);
2575         }
2576
2577         return 0;
2578 }
2579
2580 static int
2581 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2582         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2583 {
2584         int shift_param;
2585         int i, ret = 0;
2586         uint32_t last_read_idx = 0;
2587
2588         if (cpu == 1) {
2589                 shift_param = 0;
2590                 *first_ucode_section = 0;
2591         } else {
2592                 shift_param = 16;
2593                 (*first_ucode_section)++;
2594         }
2595
2596         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2597                 last_read_idx = i;
2598
2599                 /*
2600                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2601                  * CPU1 to CPU2.
2602                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2603                  * CPU2 non paged to CPU2 paging sec.
2604                  */
2605                 if (!image->fw_sect[i].data ||
2606                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2607                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2608                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2609                                     "Break since Data not valid or Empty section, sec = %d\n",
2610                                      i);
2611                         break;
2612                 }
2613
2614                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2615                 if (ret)
2616                         return ret;
2617         }
2618
2619         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2620                 iwm_set_bits_prph(sc,
2621                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2622                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2623                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2624                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2625                                         shift_param);
2626
2627         *first_ucode_section = last_read_idx;
2628
2629         return 0;
2630
2631 }
2632
2633 static int
2634 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2635         const struct iwm_fw_sects *image)
2636 {
2637         int ret = 0;
2638         int first_ucode_section;
2639
2640         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2641                      image->is_dual_cpus ? "Dual" : "Single");
2642
2643         /* load to FW the binary non secured sections of CPU1 */
2644         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2645         if (ret)
2646                 return ret;
2647
2648         if (image->is_dual_cpus) {
2649                 /* set CPU2 header address */
2650                 if (iwm_nic_lock(sc)) {
2651                         iwm_write_prph(sc,
2652                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2653                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2654                         iwm_nic_unlock(sc);
2655                 }
2656
2657                 /* load to FW the binary sections of CPU2 */
2658                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2659                                                  &first_ucode_section);
2660                 if (ret)
2661                         return ret;
2662         }
2663
2664         iwm_enable_interrupts(sc);
2665
2666         /* release CPU reset */
2667         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2668
2669         return 0;
2670 }
2671
2672 int
2673 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2674         const struct iwm_fw_sects *image)
2675 {
2676         int ret = 0;
2677         int first_ucode_section;
2678
2679         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2680                     image->is_dual_cpus ? "Dual" : "Single");
2681
2682         /* configure the ucode to be ready to get the secured image */
2683         /* release CPU reset */
2684         if (iwm_nic_lock(sc)) {
2685                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2686                     IWM_RELEASE_CPU_RESET_BIT);
2687                 iwm_nic_unlock(sc);
2688         }
2689
2690         /* load to FW the binary Secured sections of CPU1 */
2691         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2692             &first_ucode_section);
2693         if (ret)
2694                 return ret;
2695
2696         /* load to FW the binary sections of CPU2 */
2697         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2698             &first_ucode_section);
2699 }
2700
2701 /* XXX Get rid of this definition */
2702 static inline void
2703 iwm_enable_fw_load_int(struct iwm_softc *sc)
2704 {
2705         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2706         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2707         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2708 }
2709
2710 /* XXX Add proper rfkill support code */
2711 static int
2712 iwm_start_fw(struct iwm_softc *sc,
2713         const struct iwm_fw_sects *fw)
2714 {
2715         int ret;
2716
2717         /* This may fail if AMT took ownership of the device */
2718         if (iwm_prepare_card_hw(sc)) {
2719                 device_printf(sc->sc_dev,
2720                     "%s: Exit HW not ready\n", __func__);
2721                 ret = EIO;
2722                 goto out;
2723         }
2724
2725         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2726
2727         iwm_disable_interrupts(sc);
2728
2729         /* make sure rfkill handshake bits are cleared */
2730         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2731         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2732             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2733
2734         /* clear (again), then enable host interrupts */
2735         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2736
2737         ret = iwm_nic_init(sc);
2738         if (ret) {
2739                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2740                 goto out;
2741         }
2742
2743         /*
2744          * Now, we load the firmware and don't want to be interrupted, even
2745          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2746          * FH_TX interrupt which is needed to load the firmware). If the
2747          * RF-Kill switch is toggled, we will find out after having loaded
2748          * the firmware and return the proper value to the caller.
2749          */
2750         iwm_enable_fw_load_int(sc);
2751
2752         /* really make sure rfkill handshake bits are cleared */
2753         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2754         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2755
2756         /* Load the given image to the HW */
2757         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2758                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2759         else
2760                 ret = iwm_pcie_load_given_ucode(sc, fw);
2761
2762         /* XXX re-check RF-Kill state */
2763
2764 out:
2765         return ret;
2766 }
2767
2768 static int
2769 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2770 {
2771         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2772                 .valid = htole32(valid_tx_ant),
2773         };
2774
2775         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2776             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2777 }
2778
2779 static int
2780 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2781 {
2782         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2783         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2784
2785         /* Set parameters */
2786         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2787         phy_cfg_cmd.calib_control.event_trigger =
2788             sc->sc_default_calib[ucode_type].event_trigger;
2789         phy_cfg_cmd.calib_control.flow_trigger =
2790             sc->sc_default_calib[ucode_type].flow_trigger;
2791
2792         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2793             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2794         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2795             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2796 }
2797
2798 static int
2799 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2800 {
2801         struct iwm_mvm_alive_data *alive_data = data;
2802         struct iwm_mvm_alive_resp_ver1 *palive1;
2803         struct iwm_mvm_alive_resp_ver2 *palive2;
2804         struct iwm_mvm_alive_resp *palive;
2805
2806         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2807                 palive1 = (void *)pkt->data;
2808
2809                 sc->support_umac_log = FALSE;
2810                 sc->error_event_table =
2811                         le32toh(palive1->error_event_table_ptr);
2812                 sc->log_event_table =
2813                         le32toh(palive1->log_event_table_ptr);
2814                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2815
2816                 alive_data->valid = le16toh(palive1->status) ==
2817                                     IWM_ALIVE_STATUS_OK;
2818                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2819                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2820                              le16toh(palive1->status), palive1->ver_type,
2821                              palive1->ver_subtype, palive1->flags);
2822         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2823                 palive2 = (void *)pkt->data;
2824                 sc->error_event_table =
2825                         le32toh(palive2->error_event_table_ptr);
2826                 sc->log_event_table =
2827                         le32toh(palive2->log_event_table_ptr);
2828                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2829                 sc->umac_error_event_table =
2830                         le32toh(palive2->error_info_addr);
2831
2832                 alive_data->valid = le16toh(palive2->status) ==
2833                                     IWM_ALIVE_STATUS_OK;
2834                 if (sc->umac_error_event_table)
2835                         sc->support_umac_log = TRUE;
2836
2837                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2838                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2839                             le16toh(palive2->status), palive2->ver_type,
2840                             palive2->ver_subtype, palive2->flags);
2841
2842                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2843                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2844                             palive2->umac_major, palive2->umac_minor);
2845         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2846                 palive = (void *)pkt->data;
2847
2848                 sc->error_event_table =
2849                         le32toh(palive->error_event_table_ptr);
2850                 sc->log_event_table =
2851                         le32toh(palive->log_event_table_ptr);
2852                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2853                 sc->umac_error_event_table =
2854                         le32toh(palive->error_info_addr);
2855
2856                 alive_data->valid = le16toh(palive->status) ==
2857                                     IWM_ALIVE_STATUS_OK;
2858                 if (sc->umac_error_event_table)
2859                         sc->support_umac_log = TRUE;
2860
2861                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2862                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2863                             le16toh(palive->status), palive->ver_type,
2864                             palive->ver_subtype, palive->flags);
2865
2866                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2867                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2868                             le32toh(palive->umac_major),
2869                             le32toh(palive->umac_minor));
2870         }
2871
2872         return TRUE;
2873 }
2874
2875 static int
2876 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2877         struct iwm_rx_packet *pkt, void *data)
2878 {
2879         struct iwm_phy_db *phy_db = data;
2880
2881         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2882                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2883                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2884                             __func__, pkt->hdr.code);
2885                 }
2886                 return TRUE;
2887         }
2888
2889         if (iwm_phy_db_set_section(phy_db, pkt)) {
2890                 device_printf(sc->sc_dev,
2891                     "%s: iwm_phy_db_set_section failed\n", __func__);
2892         }
2893
2894         return FALSE;
2895 }
2896
2897 static int
2898 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2899         enum iwm_ucode_type ucode_type)
2900 {
2901         struct iwm_notification_wait alive_wait;
2902         struct iwm_mvm_alive_data alive_data;
2903         const struct iwm_fw_sects *fw;
2904         enum iwm_ucode_type old_type = sc->cur_ucode;
2905         int error;
2906         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2907
2908         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2909                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2910                         error);
2911                 return error;
2912         }
2913         fw = &sc->sc_fw.fw_sects[ucode_type];
2914         sc->cur_ucode = ucode_type;
2915         sc->ucode_loaded = FALSE;
2916
2917         memset(&alive_data, 0, sizeof(alive_data));
2918         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2919                                    alive_cmd, NELEM(alive_cmd),
2920                                    iwm_alive_fn, &alive_data);
2921
2922         error = iwm_start_fw(sc, fw);
2923         if (error) {
2924                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2925                 sc->cur_ucode = old_type;
2926                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2927                 return error;
2928         }
2929
2930         /*
2931          * Some things may run in the background now, but we
2932          * just wait for the ALIVE notification here.
2933          */
2934         IWM_UNLOCK(sc);
2935         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2936                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2937         IWM_LOCK(sc);
2938         if (error) {
2939                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2940                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2941                         if (iwm_nic_lock(sc)) {
2942                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2943                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2944                                 iwm_nic_unlock(sc);
2945                         }
2946                         device_printf(sc->sc_dev,
2947                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2948                             a, b);
2949                 }
2950                 sc->cur_ucode = old_type;
2951                 return error;
2952         }
2953
2954         if (!alive_data.valid) {
2955                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2956                     __func__);
2957                 sc->cur_ucode = old_type;
2958                 return EIO;
2959         }
2960
2961         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2962
2963         /*
2964          * configure and operate fw paging mechanism.
2965          * driver configures the paging flow only once, CPU2 paging image
2966          * included in the IWM_UCODE_INIT image.
2967          */
2968         if (fw->paging_mem_size) {
2969                 error = iwm_save_fw_paging(sc, fw);
2970                 if (error) {
2971                         device_printf(sc->sc_dev,
2972                             "%s: failed to save the FW paging image\n",
2973                             __func__);
2974                         return error;
2975                 }
2976
2977                 error = iwm_send_paging_cmd(sc, fw);
2978                 if (error) {
2979                         device_printf(sc->sc_dev,
2980                             "%s: failed to send the paging cmd\n", __func__);
2981                         iwm_free_fw_paging(sc);
2982                         return error;
2983                 }
2984         }
2985
2986         if (!error)
2987                 sc->ucode_loaded = TRUE;
2988         return error;
2989 }
2990
2991 /*
2992  * mvm misc bits
2993  */
2994
2995 static int
2996 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2997 {
2998         struct iwm_notification_wait calib_wait;
2999         static const uint16_t init_complete[] = {
3000                 IWM_INIT_COMPLETE_NOTIF,
3001                 IWM_CALIB_RES_NOTIF_PHY_DB
3002         };
3003         int ret;
3004
3005         /* do not operate with rfkill switch turned on */
3006         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3007                 device_printf(sc->sc_dev,
3008                     "radio is disabled by hardware switch\n");
3009                 return EPERM;
3010         }
3011
3012         iwm_init_notification_wait(sc->sc_notif_wait,
3013                                    &calib_wait,
3014                                    init_complete,
3015                                    NELEM(init_complete),
3016                                    iwm_wait_phy_db_entry,
3017                                    sc->sc_phy_db);
3018
3019         /* Will also start the device */
3020         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3021         if (ret) {
3022                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3023                     ret);
3024                 goto error;
3025         }
3026
3027         if (justnvm) {
3028                 /* Read nvm */
3029                 ret = iwm_nvm_init(sc);
3030                 if (ret) {
3031                         device_printf(sc->sc_dev, "failed to read nvm\n");
3032                         goto error;
3033                 }
3034                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3035                 goto error;
3036         }
3037
3038         ret = iwm_send_bt_init_conf(sc);
3039         if (ret) {
3040                 device_printf(sc->sc_dev,
3041                     "failed to send bt coex configuration: %d\n", ret);
3042                 goto error;
3043         }
3044
3045         /* Init Smart FIFO. */
3046         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3047         if (ret)
3048                 goto error;
3049
3050         /* Send TX valid antennas before triggering calibrations */
3051         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3052         if (ret) {
3053                 device_printf(sc->sc_dev,
3054                     "failed to send antennas before calibration: %d\n", ret);
3055                 goto error;
3056         }
3057
3058         /*
3059          * Send phy configurations command to init uCode
3060          * to start the 16.0 uCode init image internal calibrations.
3061          */
3062         ret = iwm_send_phy_cfg_cmd(sc);
3063         if (ret) {
3064                 device_printf(sc->sc_dev,
3065                     "%s: Failed to run INIT calibrations: %d\n",
3066                     __func__, ret);
3067                 goto error;
3068         }
3069
3070         /*
3071          * Nothing to do but wait for the init complete notification
3072          * from the firmware.
3073          */
3074         IWM_UNLOCK(sc);
3075         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3076             IWM_MVM_UCODE_CALIB_TIMEOUT);
3077         IWM_LOCK(sc);
3078
3079
3080         goto out;
3081
3082 error:
3083         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3084 out:
3085         return ret;
3086 }
3087
3088 /*
3089  * receive side
3090  */
3091
3092 /* (re)stock rx ring, called at init-time and at runtime */
3093 static int
3094 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3095 {
3096         struct iwm_rx_ring *ring = &sc->rxq;
3097         struct iwm_rx_data *data = &ring->data[idx];
3098         struct mbuf *m;
3099         bus_dmamap_t dmamap;
3100         bus_dma_segment_t seg;
3101         int nsegs, error;
3102
3103         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3104         if (m == NULL)
3105                 return ENOBUFS;
3106
3107         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3108 #if defined(__DragonFly__)
3109         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3110             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3111 #else
3112         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3113             &seg, &nsegs, BUS_DMA_NOWAIT);
3114 #endif
3115         if (error != 0) {
3116                 device_printf(sc->sc_dev,
3117                     "%s: can't map mbuf, error %d\n", __func__, error);
3118                 m_freem(m);
3119                 return error;
3120         }
3121
3122         if (data->m != NULL)
3123                 bus_dmamap_unload(ring->data_dmat, data->map);
3124
3125         /* Swap ring->spare_map with data->map */
3126         dmamap = data->map;
3127         data->map = ring->spare_map;
3128         ring->spare_map = dmamap;
3129
3130         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3131         data->m = m;
3132
3133         /* Update RX descriptor. */
3134         KKASSERT((seg.ds_addr & 255) == 0);
3135         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3136         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3137             BUS_DMASYNC_PREWRITE);
3138
3139         return 0;
3140 }
3141
3142 /*
3143  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3144  * values are reported by the fw as positive values - need to negate
3145  * to obtain their dBM.  Account for missing antennas by replacing 0
3146  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3147  */
3148 static int
3149 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3150 {
3151         int energy_a, energy_b, energy_c, max_energy;
3152         uint32_t val;
3153
3154         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3155         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3156             IWM_RX_INFO_ENERGY_ANT_A_POS;
3157         energy_a = energy_a ? -energy_a : -256;
3158         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3159             IWM_RX_INFO_ENERGY_ANT_B_POS;
3160         energy_b = energy_b ? -energy_b : -256;
3161         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3162             IWM_RX_INFO_ENERGY_ANT_C_POS;
3163         energy_c = energy_c ? -energy_c : -256;
3164         max_energy = MAX(energy_a, energy_b);
3165         max_energy = MAX(max_energy, energy_c);
3166
3167         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3168             "energy In A %d B %d C %d , and max %d\n",
3169             energy_a, energy_b, energy_c, max_energy);
3170
3171         return max_energy;
3172 }
3173
3174 static void
3175 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3176 {
3177         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3178
3179         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3180
3181         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3182 }
3183
3184 /*
3185  * Retrieve the average noise (in dBm) among receivers.
3186  */
3187 static int
3188 iwm_get_noise(struct iwm_softc *sc,
3189         const struct iwm_mvm_statistics_rx_non_phy *stats)
3190 {
3191         int i, total, nbant, noise;
3192
3193         total = nbant = noise = 0;
3194         for (i = 0; i < 3; i++) {
3195                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3196                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3197                     __func__, i, noise);
3198
3199                 if (noise) {
3200                         total += noise;
3201                         nbant++;
3202                 }
3203         }
3204
3205         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3206             __func__, nbant, total);
3207 #if 0
3208         /* There should be at least one antenna but check anyway. */
3209         return (nbant == 0) ? -127 : (total / nbant) - 107;
3210 #else
3211         /* For now, just hard-code it to -96 to be safe */
3212         return (-96);
3213 #endif
3214 }
3215
3216 /*
3217  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3218  *
3219  * Handles the actual data of the Rx packet from the fw
3220  */
3221 static boolean_t
3222 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3223         boolean_t stolen)
3224 {
3225         struct ieee80211com *ic = &sc->sc_ic;
3226         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3227         struct ieee80211_frame *wh;
3228         struct ieee80211_node *ni;
3229         struct ieee80211_rx_stats rxs;
3230         struct iwm_rx_phy_info *phy_info;
3231         struct iwm_rx_mpdu_res_start *rx_res;
3232         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3233         uint32_t len;
3234         uint32_t rx_pkt_status;
3235         int rssi;
3236
3237         phy_info = &sc->sc_last_phy_info;
3238         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3239         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3240         len = le16toh(rx_res->byte_count);
3241         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3242
3243         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3244                 device_printf(sc->sc_dev,
3245                     "dsp size out of range [0,20]: %d\n",
3246                     phy_info->cfg_phy_cnt);
3247                 return FALSE;
3248         }
3249
3250         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3251             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3252                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3253                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3254                 return FALSE; /* drop */
3255         }
3256
3257         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3258         /* Note: RSSI is absolute (ie a -ve value) */
3259         if (rssi < IWM_MIN_DBM)
3260                 rssi = IWM_MIN_DBM;
3261         else if (rssi > IWM_MAX_DBM)
3262                 rssi = IWM_MAX_DBM;
3263
3264         /* Map it to relative value */
3265         rssi = rssi - sc->sc_noise;
3266
3267         /* replenish ring for the buffer we're going to feed to the sharks */
3268         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3269                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3270                     __func__);
3271                 return FALSE;
3272         }
3273
3274         m->m_data = pkt->data + sizeof(*rx_res);
3275         m->m_pkthdr.len = m->m_len = len;
3276
3277         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3278             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3279
3280         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3281
3282         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3283             "%s: phy_info: channel=%d, flags=0x%08x\n",
3284             __func__,
3285             le16toh(phy_info->channel),
3286             le16toh(phy_info->phy_flags));
3287
3288         /*
3289          * Populate an RX state struct with the provided information.
3290          */
3291         bzero(&rxs, sizeof(rxs));
3292         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3293         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3294         rxs.c_ieee = le16toh(phy_info->channel);
3295         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3296                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3297         } else {
3298                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3299         }
3300         /* rssi is in 1/2db units */
3301         rxs.rssi = rssi * 2;
3302         rxs.nf = sc->sc_noise;
3303
3304         if (ieee80211_radiotap_active_vap(vap)) {
3305                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3306
3307                 tap->wr_flags = 0;
3308                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3309                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3310                 tap->wr_chan_freq = htole16(rxs.c_freq);
3311                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3312                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3313                 tap->wr_dbm_antsignal = (int8_t)rssi;
3314                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3315                 tap->wr_tsft = phy_info->system_timestamp;
3316                 switch (phy_info->rate) {
3317                 /* CCK rates. */
3318                 case  10: tap->wr_rate =   2; break;
3319                 case  20: tap->wr_rate =   4; break;
3320                 case  55: tap->wr_rate =  11; break;
3321                 case 110: tap->wr_rate =  22; break;
3322                 /* OFDM rates. */
3323                 case 0xd: tap->wr_rate =  12; break;
3324                 case 0xf: tap->wr_rate =  18; break;
3325                 case 0x5: tap->wr_rate =  24; break;
3326                 case 0x7: tap->wr_rate =  36; break;
3327                 case 0x9: tap->wr_rate =  48; break;
3328                 case 0xb: tap->wr_rate =  72; break;
3329                 case 0x1: tap->wr_rate =  96; break;
3330                 case 0x3: tap->wr_rate = 108; break;
3331                 /* Unknown rate: should not happen. */
3332                 default:  tap->wr_rate =   0;
3333                 }
3334         }
3335
3336         IWM_UNLOCK(sc);
3337         if (ni != NULL) {
3338                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3339                 ieee80211_input_mimo(ni, m, &rxs);
3340                 ieee80211_free_node(ni);
3341         } else {
3342                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3343                 ieee80211_input_mimo_all(ic, m, &rxs);
3344         }
3345         IWM_LOCK(sc);
3346
3347         return TRUE;
3348 }
3349
3350 static int
3351 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3352         struct iwm_node *in)
3353 {
3354         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3355         struct ieee80211_node *ni = &in->in_ni;
3356         struct ieee80211vap *vap = ni->ni_vap;
3357         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3358         int failack = tx_resp->failure_frame;
3359
3360         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3361
3362         /* Update rate control statistics. */
3363         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3364             __func__,
3365             (int) le16toh(tx_resp->status.status),
3366             (int) le16toh(tx_resp->status.sequence),
3367             tx_resp->frame_count,
3368             tx_resp->bt_kill_count,
3369             tx_resp->failure_rts,
3370             tx_resp->failure_frame,
3371             le32toh(tx_resp->initial_rate),
3372             (int) le16toh(tx_resp->wireless_media_time));
3373
3374         if (status != IWM_TX_STATUS_SUCCESS &&
3375             status != IWM_TX_STATUS_DIRECT_DONE) {
3376                 ieee80211_ratectl_tx_complete(vap, ni,
3377                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3378                 return (1);
3379         } else {
3380                 ieee80211_ratectl_tx_complete(vap, ni,
3381                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3382                 return (0);
3383         }
3384 }
3385
3386 static void
3387 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3388 {
3389         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3390         int idx = cmd_hdr->idx;
3391         int qid = cmd_hdr->qid;
3392         struct iwm_tx_ring *ring = &sc->txq[qid];
3393         struct iwm_tx_data *txd = &ring->data[idx];
3394         struct iwm_node *in = txd->in;
3395         struct mbuf *m = txd->m;
3396         int status;
3397
3398         KASSERT(txd->done == 0, ("txd not done"));
3399         KASSERT(txd->in != NULL, ("txd without node"));
3400         KASSERT(txd->m != NULL, ("txd without mbuf"));
3401
3402         sc->sc_tx_timer = 0;
3403
3404         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3405
3406         /* Unmap and free mbuf. */
3407         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3408         bus_dmamap_unload(ring->data_dmat, txd->map);
3409
3410         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3411             "free txd %p, in %p\n", txd, txd->in);
3412         txd->done = 1;
3413         txd->m = NULL;
3414         txd->in = NULL;
3415
3416         ieee80211_tx_complete(&in->in_ni, m, status);
3417
3418         if (--ring->queued < IWM_TX_RING_LOMARK) {
3419                 sc->qfullmsk &= ~(1 << ring->qid);
3420                 if (sc->qfullmsk == 0) {
3421                         iwm_start(sc);
3422                 }
3423         }
3424 }
3425
3426 /*
3427  * transmit side
3428  */
3429
3430 /*
3431  * Process a "command done" firmware notification.  This is where we wakeup
3432  * processes waiting for a synchronous command completion.
3433  * from if_iwn
3434  */
3435 static void
3436 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3437 {
3438         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3439         struct iwm_tx_data *data;
3440
3441         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3442                 return; /* Not a command ack. */
3443         }
3444
3445         data = &ring->data[pkt->hdr.idx];
3446
3447         /* If the command was mapped in an mbuf, free it. */
3448         if (data->m != NULL) {
3449                 bus_dmamap_sync(ring->data_dmat, data->map,
3450                     BUS_DMASYNC_POSTWRITE);
3451                 bus_dmamap_unload(ring->data_dmat, data->map);
3452                 m_freem(data->m);
3453                 data->m = NULL;
3454         }
3455         wakeup(&ring->desc[pkt->hdr.idx]);
3456
3457         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3458                 device_printf(sc->sc_dev,
3459                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3460                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3461                 /* XXX call iwm_force_nmi() */
3462         }
3463
3464         KKASSERT(ring->queued > 0);
3465         ring->queued--;
3466         if (ring->queued == 0)
3467                 iwm_pcie_clear_cmd_in_flight(sc);
3468 }
3469
3470 #if 0
3471 /*
3472  * necessary only for block ack mode
3473  */
3474 void
3475 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3476         uint16_t len)
3477 {
3478         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3479         uint16_t w_val;
3480
3481         scd_bc_tbl = sc->sched_dma.vaddr;
3482
3483         len += 8; /* magic numbers came naturally from paris */
3484         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3485                 len = roundup(len, 4) / 4;
3486
3487         w_val = htole16(sta_id << 12 | len);
3488
3489         /* Update TX scheduler. */
3490         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3491         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3492             BUS_DMASYNC_PREWRITE);
3493
3494         /* I really wonder what this is ?!? */
3495         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3496                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3497                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3498                     BUS_DMASYNC_PREWRITE);
3499         }
3500 }
3501 #endif
3502
3503 /*
3504  * Take an 802.11 (non-n) rate, find the relevant rate
3505  * table entry.  return the index into in_ridx[].
3506  *
3507  * The caller then uses that index back into in_ridx
3508  * to figure out the rate index programmed /into/
3509  * the firmware for this given node.
3510  */
3511 static int
3512 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3513     uint8_t rate)
3514 {
3515         int i;
3516         uint8_t r;
3517
3518         for (i = 0; i < nitems(in->in_ridx); i++) {
3519                 r = iwm_rates[in->in_ridx[i]].rate;
3520                 if (rate == r)
3521                         return (i);
3522         }
3523         /* XXX Return the first */
3524         /* XXX TODO: have it return the /lowest/ */
3525         return (0);
3526 }
3527
3528 /*
3529  * Fill in the rate related information for a transmit command.
3530  */
3531 static const struct iwm_rate *
3532 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3533         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3534 {
3535         struct ieee80211com *ic = &sc->sc_ic;
3536         struct ieee80211_node *ni = &in->in_ni;
3537         const struct iwm_rate *rinfo;
3538         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3539         int ridx, rate_flags;
3540
3541         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3542         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3543
3544         /*
3545          * XXX TODO: everything about the rate selection here is terrible!
3546          */
3547
3548         if (type == IEEE80211_FC0_TYPE_DATA) {
3549                 int i;
3550                 /* for data frames, use RS table */
3551                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3552                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3553                 ridx = in->in_ridx[i];
3554
3555                 /* This is the index into the programmed table */
3556                 tx->initial_rate_index = i;
3557                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3558                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3559                     "%s: start with i=%d, txrate %d\n",
3560                     __func__, i, iwm_rates[ridx].rate);
3561         } else {
3562                 /*
3563                  * For non-data, use the lowest supported rate for the given
3564                  * operational mode.
3565                  *
3566                  * Note: there may not be any rate control information available.
3567                  * This driver currently assumes if we're transmitting data
3568                  * frames, use the rate control table.  Grr.
3569                  *
3570                  * XXX TODO: use the configured rate for the traffic type!
3571                  * XXX TODO: this should be per-vap, not curmode; as we later
3572                  * on we'll want to handle off-channel stuff (eg TDLS).
3573                  */
3574                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3575                         /*
3576                          * XXX this assumes the mode is either 11a or not 11a;
3577                          * definitely won't work for 11n.
3578                          */
3579                         ridx = IWM_RIDX_OFDM;
3580                 } else {
3581                         ridx = IWM_RIDX_CCK;
3582                 }
3583         }
3584
3585         rinfo = &iwm_rates[ridx];
3586
3587         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3588             __func__, ridx,
3589             rinfo->rate,
3590             !! (IWM_RIDX_IS_CCK(ridx))
3591             );
3592
3593         /* XXX TODO: hard-coded TX antenna? */
3594         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3595         if (IWM_RIDX_IS_CCK(ridx))
3596                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3597         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3598
3599         return rinfo;
3600 }
3601
3602 #define TB0_SIZE 16
3603 static int
3604 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3605 {
3606         struct ieee80211com *ic = &sc->sc_ic;
3607         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3608         struct iwm_node *in = IWM_NODE(ni);
3609         struct iwm_tx_ring *ring;
3610         struct iwm_tx_data *data;
3611         struct iwm_tfd *desc;
3612         struct iwm_device_cmd *cmd;
3613         struct iwm_tx_cmd *tx;
3614         struct ieee80211_frame *wh;
3615         struct ieee80211_key *k = NULL;
3616 #if !defined(__DragonFly__)
3617         struct mbuf *m1;
3618 #endif
3619         const struct iwm_rate *rinfo;
3620         uint32_t flags;
3621         u_int hdrlen;
3622         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3623         int nsegs;
3624         uint8_t tid, type;
3625         int i, totlen, error, pad;
3626
3627         wh = mtod(m, struct ieee80211_frame *);
3628         hdrlen = ieee80211_anyhdrsize(wh);
3629         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3630         tid = 0;
3631         ring = &sc->txq[ac];
3632         desc = &ring->desc[ring->cur];
3633         memset(desc, 0, sizeof(*desc));
3634         data = &ring->data[ring->cur];
3635
3636         /* Fill out iwm_tx_cmd to send to the firmware */
3637         cmd = &ring->cmd[ring->cur];
3638         cmd->hdr.code = IWM_TX_CMD;
3639         cmd->hdr.flags = 0;
3640         cmd->hdr.qid = ring->qid;
3641         cmd->hdr.idx = ring->cur;
3642
3643         tx = (void *)cmd->data;
3644         memset(tx, 0, sizeof(*tx));
3645
3646         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3647
3648         /* Encrypt the frame if need be. */
3649         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3650                 /* Retrieve key for TX && do software encryption. */
3651                 k = ieee80211_crypto_encap(ni, m);
3652                 if (k == NULL) {
3653                         m_freem(m);
3654                         return (ENOBUFS);
3655                 }
3656                 /* 802.11 header may have moved. */
3657                 wh = mtod(m, struct ieee80211_frame *);
3658         }
3659
3660         if (ieee80211_radiotap_active_vap(vap)) {
3661                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3662
3663                 tap->wt_flags = 0;
3664                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3665                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3666                 tap->wt_rate = rinfo->rate;
3667                 if (k != NULL)
3668                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3669                 ieee80211_radiotap_tx(vap, m);
3670         }
3671
3672
3673         totlen = m->m_pkthdr.len;
3674
3675         flags = 0;
3676         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3677                 flags |= IWM_TX_CMD_FLG_ACK;
3678         }
3679
3680         if (type == IEEE80211_FC0_TYPE_DATA
3681             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3682             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3683                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3684         }
3685
3686         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3687             type != IEEE80211_FC0_TYPE_DATA)
3688                 tx->sta_id = sc->sc_aux_sta.sta_id;
3689         else
3690                 tx->sta_id = IWM_STATION_ID;
3691
3692         if (type == IEEE80211_FC0_TYPE_MGT) {
3693                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3694
3695                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3696                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3697                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3698                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3699                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3700                 } else {
3701                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3702                 }
3703         } else {
3704                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3705         }
3706
3707         if (hdrlen & 3) {
3708                 /* First segment length must be a multiple of 4. */
3709                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3710                 pad = 4 - (hdrlen & 3);
3711         } else
3712                 pad = 0;
3713
3714         tx->driver_txop = 0;
3715         tx->next_frame_len = 0;
3716
3717         tx->len = htole16(totlen);
3718         tx->tid_tspec = tid;
3719         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3720
3721         /* Set physical address of "scratch area". */
3722         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3723         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3724
3725         /* Copy 802.11 header in TX command. */
3726         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3727
3728         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3729
3730         tx->sec_ctl = 0;
3731         tx->tx_flags |= htole32(flags);
3732
3733         /* Trim 802.11 header. */
3734         m_adj(m, hdrlen);
3735 #if defined(__DragonFly__)
3736         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3737                                             segs, IWM_MAX_SCATTER - 2,
3738                                             &nsegs, BUS_DMA_NOWAIT);
3739 #else
3740         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3741             segs, &nsegs, BUS_DMA_NOWAIT);
3742 #endif
3743         if (error != 0) {
3744 #if defined(__DragonFly__)
3745                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3746                     error);
3747                 m_freem(m);
3748                 return error;
3749 #else
3750                 if (error != EFBIG) {
3751                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3752                             error);
3753                         m_freem(m);
3754                         return error;
3755                 }
3756                 /* Too many DMA segments, linearize mbuf. */
3757                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3758                 if (m1 == NULL) {
3759                         device_printf(sc->sc_dev,
3760                             "%s: could not defrag mbuf\n", __func__);
3761                         m_freem(m);
3762                         return (ENOBUFS);
3763                 }
3764                 m = m1;
3765
3766                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3767                     segs, &nsegs, BUS_DMA_NOWAIT);
3768                 if (error != 0) {
3769                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3770                             error);
3771                         m_freem(m);
3772                         return error;
3773                 }
3774 #endif
3775         }
3776         data->m = m;
3777         data->in = in;
3778         data->done = 0;
3779
3780         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3781             "sending txd %p, in %p\n", data, data->in);
3782         KASSERT(data->in != NULL, ("node is NULL"));
3783
3784         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3785             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3786             ring->qid, ring->cur, totlen, nsegs,
3787             le32toh(tx->tx_flags),
3788             le32toh(tx->rate_n_flags),
3789             tx->initial_rate_index
3790             );
3791
3792         /* Fill TX descriptor. */
3793         desc->num_tbs = 2 + nsegs;
3794
3795         desc->tbs[0].lo = htole32(data->cmd_paddr);
3796         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3797             (TB0_SIZE << 4);
3798         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3799         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3800             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3801               + hdrlen + pad - TB0_SIZE) << 4);
3802
3803         /* Other DMA segments are for data payload. */
3804         for (i = 0; i < nsegs; i++) {
3805                 seg = &segs[i];
3806                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3807                 desc->tbs[i+2].hi_n_len = \
3808                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3809                     | ((seg->ds_len) << 4);
3810         }
3811
3812         bus_dmamap_sync(ring->data_dmat, data->map,
3813             BUS_DMASYNC_PREWRITE);
3814         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3815             BUS_DMASYNC_PREWRITE);
3816         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3817             BUS_DMASYNC_PREWRITE);
3818
3819 #if 0
3820         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3821 #endif
3822
3823         /* Kick TX ring. */
3824         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3825         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3826
3827         /* Mark TX ring as full if we reach a certain threshold. */
3828         if (++ring->queued > IWM_TX_RING_HIMARK) {
3829                 sc->qfullmsk |= 1 << ring->qid;
3830         }
3831
3832         return 0;
3833 }
3834
3835 static int
3836 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3837     const struct ieee80211_bpf_params *params)
3838 {
3839         struct ieee80211com *ic = ni->ni_ic;
3840         struct iwm_softc *sc = ic->ic_softc;
3841         int error = 0;
3842
3843         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3844             "->%s begin\n", __func__);
3845
3846         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3847                 m_freem(m);
3848                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3849                     "<-%s not RUNNING\n", __func__);
3850                 return (ENETDOWN);
3851         }
3852
3853         IWM_LOCK(sc);
3854         /* XXX fix this */
3855         if (params == NULL) {
3856                 error = iwm_tx(sc, m, ni, 0);
3857         } else {
3858                 error = iwm_tx(sc, m, ni, 0);
3859         }
3860         sc->sc_tx_timer = 5;
3861         IWM_UNLOCK(sc);
3862
3863         return (error);
3864 }
3865
3866 /*
3867  * mvm/tx.c
3868  */
3869
3870 /*
3871  * Note that there are transports that buffer frames before they reach
3872  * the firmware. This means that after flush_tx_path is called, the
3873  * queue might not be empty. The race-free way to handle this is to:
3874  * 1) set the station as draining
3875  * 2) flush the Tx path
3876  * 3) wait for the transport queues to be empty
3877  */
3878 int
3879 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3880 {
3881         int ret;
3882         struct iwm_tx_path_flush_cmd flush_cmd = {
3883                 .queues_ctl = htole32(tfd_msk),
3884                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3885         };
3886
3887         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3888             sizeof(flush_cmd), &flush_cmd);
3889         if (ret)
3890                 device_printf(sc->sc_dev,
3891                     "Flushing tx queue failed: %d\n", ret);
3892         return ret;
3893 }
3894
3895 static int
3896 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3897 {
3898         struct iwm_time_quota_cmd cmd;
3899         int i, idx, ret, num_active_macs, quota, quota_rem;
3900         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3901         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3902         uint16_t id;
3903
3904         memset(&cmd, 0, sizeof(cmd));
3905
3906         /* currently, PHY ID == binding ID */
3907         if (ivp) {
3908                 id = ivp->phy_ctxt->id;
3909                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3910                 colors[id] = ivp->phy_ctxt->color;
3911
3912                 if (1)
3913                         n_ifs[id] = 1;
3914         }
3915
3916         /*
3917          * The FW's scheduling session consists of
3918          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3919          * equally between all the bindings that require quota
3920          */
3921         num_active_macs = 0;
3922         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3923                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3924                 num_active_macs += n_ifs[i];
3925         }
3926
3927         quota = 0;
3928         quota_rem = 0;
3929         if (num_active_macs) {
3930                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3931                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3932         }
3933
3934         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3935                 if (colors[i] < 0)
3936                         continue;
3937
3938                 cmd.quotas[idx].id_and_color =
3939                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3940
3941                 if (n_ifs[i] <= 0) {
3942                         cmd.quotas[idx].quota = htole32(0);
3943                         cmd.quotas[idx].max_duration = htole32(0);
3944                 } else {
3945                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3946                         cmd.quotas[idx].max_duration = htole32(0);
3947                 }
3948                 idx++;
3949         }
3950
3951         /* Give the remainder of the session to the first binding */
3952         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3953
3954         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3955             sizeof(cmd), &cmd);
3956         if (ret)
3957                 device_printf(sc->sc_dev,
3958                     "%s: Failed to send quota: %d\n", __func__, ret);
3959         return ret;
3960 }
3961
3962 /*
3963  * ieee80211 routines
3964  */
3965
3966 /*
3967  * Change to AUTH state in 80211 state machine.  Roughly matches what
3968  * Linux does in bss_info_changed().
3969  */
3970 static int
3971 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3972 {
3973         struct ieee80211_node *ni;
3974         struct iwm_node *in;
3975         struct iwm_vap *iv = IWM_VAP(vap);
3976         uint32_t duration;
3977         int error;
3978
3979         /*
3980          * XXX i have a feeling that the vap node is being
3981          * freed from underneath us. Grr.
3982          */
3983         ni = ieee80211_ref_node(vap->iv_bss);
3984         in = IWM_NODE(ni);
3985         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3986             "%s: called; vap=%p, bss ni=%p\n",
3987             __func__,
3988             vap,
3989             ni);
3990
3991         in->in_assoc = 0;
3992
3993         /*
3994          * Firmware bug - it'll crash if the beacon interval is less
3995          * than 16. We can't avoid connecting at all, so refuse the
3996          * station state change, this will cause net80211 to abandon
3997          * attempts to connect to this AP, and eventually wpa_s will
3998          * blacklist the AP...
3999          */
4000         if (ni->ni_intval < 16) {
4001                 device_printf(sc->sc_dev,
4002                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4003                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
4004                 error = EINVAL;
4005                 goto out;
4006         }
4007
4008         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4009         if (error != 0)
4010                 return error;
4011
4012         error = iwm_allow_mcast(vap, sc);
4013         if (error) {
4014                 device_printf(sc->sc_dev,
4015                     "%s: failed to set multicast\n", __func__);
4016                 goto out;
4017         }
4018
4019         /*
4020          * This is where it deviates from what Linux does.
4021          *
4022          * Linux iwlwifi doesn't reset the nic each time, nor does it
4023          * call ctxt_add() here.  Instead, it adds it during vap creation,
4024          * and always does a mac_ctx_changed().
4025          *
4026          * The openbsd port doesn't attempt to do that - it reset things
4027          * at odd states and does the add here.
4028          *
4029          * So, until the state handling is fixed (ie, we never reset
4030          * the NIC except for a firmware failure, which should drag
4031          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4032          * contexts that are required), let's do a dirty hack here.
4033          */
4034         if (iv->is_uploaded) {
4035                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4036                         device_printf(sc->sc_dev,
4037                             "%s: failed to update MAC\n", __func__);
4038                         goto out;
4039                 }
4040         } else {
4041                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4042                         device_printf(sc->sc_dev,
4043                             "%s: failed to add MAC\n", __func__);
4044                         goto out;
4045                 }
4046         }
4047
4048         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4049             in->in_ni.ni_chan, 1, 1)) != 0) {
4050                 device_printf(sc->sc_dev,
4051                     "%s: failed update phy ctxt\n", __func__);
4052                 goto out;
4053         }
4054         iv->phy_ctxt = &sc->sc_phyctxt[0];
4055
4056         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4057                 device_printf(sc->sc_dev,
4058                     "%s: binding update cmd\n", __func__);
4059                 goto out;
4060         }
4061         /*
4062          * Authentication becomes unreliable when powersaving is left enabled
4063          * here. Powersaving will be activated again when association has
4064          * finished or is aborted.
4065          */
4066         iv->ps_disabled = TRUE;
4067         error = iwm_mvm_power_update_mac(sc);
4068         iv->ps_disabled = FALSE;
4069         if (error != 0) {
4070                 device_printf(sc->sc_dev,
4071                     "%s: failed to update power management\n",
4072                     __func__);
4073                 goto out;
4074         }
4075         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4076                 device_printf(sc->sc_dev,
4077                     "%s: failed to add sta\n", __func__);
4078                 goto out;
4079         }
4080
4081         /*
4082          * Prevent the FW from wandering off channel during association
4083          * by "protecting" the session with a time event.
4084          */
4085         /* XXX duration is in units of TU, not MS */
4086         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4087         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4088         DELAY(100);
4089
4090         error = 0;
4091 out:
4092         ieee80211_free_node(ni);
4093         return (error);
4094 }
4095
4096 static int
4097 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4098 {
4099         uint32_t tfd_msk;
4100
4101         /*
4102          * Ok, so *technically* the proper set of calls for going
4103          * from RUN back to SCAN is:
4104          *
4105          * iwm_mvm_power_mac_disable(sc, in);
4106          * iwm_mvm_mac_ctxt_changed(sc, vap);
4107          * iwm_mvm_rm_sta(sc, in);
4108          * iwm_mvm_update_quotas(sc, NULL);
4109          * iwm_mvm_mac_ctxt_changed(sc, in);
4110          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4111          * iwm_mvm_mac_ctxt_remove(sc, in);
4112          *
4113          * However, that freezes the device not matter which permutations
4114          * and modifications are attempted.  Obviously, this driver is missing
4115          * something since it works in the Linux driver, but figuring out what
4116          * is missing is a little more complicated.  Now, since we're going
4117          * back to nothing anyway, we'll just do a complete device reset.
4118          * Up your's, device!
4119          */
4120         /*
4121          * Just using 0xf for the queues mask is fine as long as we only
4122          * get here from RUN state.
4123          */
4124         tfd_msk = 0xf;
4125         mbufq_drain(&sc->sc_snd);
4126         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4127         /*
4128          * We seem to get away with just synchronously sending the
4129          * IWM_TXPATH_FLUSH command.
4130          */
4131 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4132         iwm_stop_device(sc);
4133         iwm_init_hw(sc);
4134         if (in)
4135                 in->in_assoc = 0;
4136         return 0;
4137
4138 #if 0
4139         int error;
4140
4141         iwm_mvm_power_mac_disable(sc, in);
4142
4143         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4144                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4145                 return error;
4146         }
4147
4148         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4149                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4150                 return error;
4151         }
4152         error = iwm_mvm_rm_sta(sc, in);
4153         in->in_assoc = 0;
4154         iwm_mvm_update_quotas(sc, NULL);
4155         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4156                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4157                 return error;
4158         }
4159         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4160
4161         iwm_mvm_mac_ctxt_remove(sc, in);
4162
4163         return error;
4164 #endif
4165 }
4166
4167 static struct ieee80211_node *
4168 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4169 {
4170         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4171             M_INTWAIT | M_ZERO);
4172 }
4173
4174 uint8_t
4175 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4176 {
4177         int i;
4178         uint8_t rval;
4179
4180         for (i = 0; i < rs->rs_nrates; i++) {
4181                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4182                 if (rval == iwm_rates[ridx].rate)
4183                         return rs->rs_rates[i];
4184         }
4185
4186         return 0;
4187 }
4188
4189 static void
4190 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4191 {
4192         struct ieee80211_node *ni = &in->in_ni;
4193         struct iwm_lq_cmd *lq = &in->in_lq;
4194         int nrates = ni->ni_rates.rs_nrates;
4195         int i, ridx, tab = 0;
4196         int txant = 0;
4197
4198         if (nrates > nitems(lq->rs_table)) {
4199                 device_printf(sc->sc_dev,
4200                     "%s: node supports %d rates, driver handles "
4201                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4202                 return;
4203         }
4204         if (nrates == 0) {
4205                 device_printf(sc->sc_dev,
4206                     "%s: node supports 0 rates, odd!\n", __func__);
4207                 return;
4208         }
4209
4210         /*
4211          * XXX .. and most of iwm_node is not initialised explicitly;
4212          * it's all just 0x0 passed to the firmware.
4213          */
4214
4215         /* first figure out which rates we should support */
4216         /* XXX TODO: this isn't 11n aware /at all/ */
4217         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4218         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4219             "%s: nrates=%d\n", __func__, nrates);
4220
4221         /*
4222          * Loop over nrates and populate in_ridx from the highest
4223          * rate to the lowest rate.  Remember, in_ridx[] has
4224          * IEEE80211_RATE_MAXSIZE entries!
4225          */
4226         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4227                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4228
4229                 /* Map 802.11 rate to HW rate index. */
4230                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4231                         if (iwm_rates[ridx].rate == rate)
4232                                 break;
4233                 if (ridx > IWM_RIDX_MAX) {
4234                         device_printf(sc->sc_dev,
4235                             "%s: WARNING: device rate for %d not found!\n",
4236                             __func__, rate);
4237                 } else {
4238                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4239                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4240                             __func__,
4241                             i,
4242                             rate,
4243                             ridx);
4244                         in->in_ridx[i] = ridx;
4245                 }
4246         }
4247
4248         /* then construct a lq_cmd based on those */
4249         memset(lq, 0, sizeof(*lq));
4250         lq->sta_id = IWM_STATION_ID;
4251
4252         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4253         if (ni->ni_flags & IEEE80211_NODE_HT)
4254                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4255
4256         /*
4257          * are these used? (we don't do SISO or MIMO)
4258          * need to set them to non-zero, though, or we get an error.
4259          */
4260         lq->single_stream_ant_msk = 1;
4261         lq->dual_stream_ant_msk = 1;
4262
4263         /*
4264          * Build the actual rate selection table.
4265          * The lowest bits are the rates.  Additionally,
4266          * CCK needs bit 9 to be set.  The rest of the bits
4267          * we add to the table select the tx antenna
4268          * Note that we add the rates in the highest rate first
4269          * (opposite of ni_rates).
4270          */
4271         /*
4272          * XXX TODO: this should be looping over the min of nrates
4273          * and LQ_MAX_RETRY_NUM.  Sigh.
4274          */
4275         for (i = 0; i < nrates; i++) {
4276                 int nextant;
4277
4278                 if (txant == 0)
4279                         txant = iwm_mvm_get_valid_tx_ant(sc);
4280                 nextant = 1<<(ffs(txant)-1);
4281                 txant &= ~nextant;
4282
4283                 /*
4284                  * Map the rate id into a rate index into
4285                  * our hardware table containing the
4286                  * configuration to use for this rate.
4287                  */
4288                 ridx = in->in_ridx[i];
4289                 tab = iwm_rates[ridx].plcp;
4290                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4291                 if (IWM_RIDX_IS_CCK(ridx))
4292                         tab |= IWM_RATE_MCS_CCK_MSK;
4293                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4294                     "station rate i=%d, rate=%d, hw=%x\n",
4295                     i, iwm_rates[ridx].rate, tab);
4296                 lq->rs_table[i] = htole32(tab);
4297         }
4298         /* then fill the rest with the lowest possible rate */
4299         for (i = nrates; i < nitems(lq->rs_table); i++) {
4300                 KASSERT(tab != 0, ("invalid tab"));
4301                 lq->rs_table[i] = htole32(tab);
4302         }
4303 }
4304
4305 static int
4306 iwm_media_change(struct ifnet *ifp)
4307 {
4308         struct ieee80211vap *vap = ifp->if_softc;
4309         struct ieee80211com *ic = vap->iv_ic;
4310         struct iwm_softc *sc = ic->ic_softc;
4311         int error;
4312
4313         error = ieee80211_media_change(ifp);
4314         if (error != ENETRESET)
4315                 return error;
4316
4317         IWM_LOCK(sc);
4318         if (ic->ic_nrunning > 0) {
4319                 iwm_stop(sc);
4320                 iwm_init(sc);
4321         }
4322         IWM_UNLOCK(sc);
4323         return error;
4324 }
4325
4326
4327 static int
4328 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4329 {
4330         struct iwm_vap *ivp = IWM_VAP(vap);
4331         struct ieee80211com *ic = vap->iv_ic;
4332         struct iwm_softc *sc = ic->ic_softc;
4333         struct iwm_node *in;
4334         int error;
4335
4336         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4337             "switching state %s -> %s\n",
4338             ieee80211_state_name[vap->iv_state],
4339             ieee80211_state_name[nstate]);
4340         IEEE80211_UNLOCK(ic);
4341         IWM_LOCK(sc);
4342
4343         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4344                 iwm_led_blink_stop(sc);
4345
4346         /* disable beacon filtering if we're hopping out of RUN */
4347         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4348                 iwm_mvm_disable_beacon_filter(sc);
4349
4350                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4351                         in->in_assoc = 0;
4352
4353                 if (nstate == IEEE80211_S_INIT) {
4354                         IWM_UNLOCK(sc);
4355                         IEEE80211_LOCK(ic);
4356                         error = ivp->iv_newstate(vap, nstate, arg);
4357                         IEEE80211_UNLOCK(ic);
4358                         IWM_LOCK(sc);
4359                         iwm_release(sc, NULL);
4360                         IWM_UNLOCK(sc);
4361                         IEEE80211_LOCK(ic);
4362                         return error;
4363                 }
4364
4365                 /*
4366                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4367                  * above then the card will be completely reinitialized,
4368                  * so the driver must do everything necessary to bring the card
4369                  * from INIT to SCAN.
4370                  *
4371                  * Additionally, upon receiving deauth frame from AP,
4372                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4373                  * state. This will also fail with this driver, so bring the FSM
4374                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4375                  *
4376                  * XXX TODO: fix this for FreeBSD!
4377                  */
4378                 if (nstate == IEEE80211_S_SCAN ||
4379                     nstate == IEEE80211_S_AUTH ||
4380                     nstate == IEEE80211_S_ASSOC) {
4381                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4382                             "Force transition to INIT; MGT=%d\n", arg);
4383                         IWM_UNLOCK(sc);
4384                         IEEE80211_LOCK(ic);
4385                         /* Always pass arg as -1 since we can't Tx right now. */
4386                         /*
4387                          * XXX arg is just ignored anyway when transitioning
4388                          *     to IEEE80211_S_INIT.
4389                          */
4390                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4391                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4392                             "Going INIT->SCAN\n");
4393                         nstate = IEEE80211_S_SCAN;
4394                         IEEE80211_UNLOCK(ic);
4395                         IWM_LOCK(sc);
4396                 }
4397         }
4398
4399         switch (nstate) {
4400         case IEEE80211_S_INIT:
4401         case IEEE80211_S_SCAN:
4402                 if (vap->iv_state == IEEE80211_S_AUTH ||
4403                     vap->iv_state == IEEE80211_S_ASSOC) {
4404                         int myerr;
4405                         IWM_UNLOCK(sc);
4406                         IEEE80211_LOCK(ic);
4407                         myerr = ivp->iv_newstate(vap, nstate, arg);
4408                         IEEE80211_UNLOCK(ic);
4409                         IWM_LOCK(sc);
4410                         error = iwm_mvm_rm_sta(sc, vap, FALSE);
4411                         if (error) {
4412                                 device_printf(sc->sc_dev,
4413                                     "%s: Failed to remove station: %d\n",
4414                                     __func__, error);
4415                         }
4416                         error = iwm_mvm_mac_ctxt_changed(sc, vap);
4417                         if (error) {
4418                                 device_printf(sc->sc_dev,
4419                                     "%s: Failed to change mac context: %d\n",
4420                                     __func__, error);
4421                         }
4422                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4423                         if (error) {
4424                                 device_printf(sc->sc_dev,
4425                                     "%s: Failed to remove channel ctx: %d\n",
4426                                     __func__, error);
4427                         }
4428                         ivp->phy_ctxt = NULL;
4429                         error = iwm_mvm_power_update_mac(sc);
4430                         if (error != 0) {
4431                                 device_printf(sc->sc_dev,
4432                                     "%s: failed to update power management\n",
4433                                     __func__);
4434                         }
4435                         IWM_UNLOCK(sc);
4436                         IEEE80211_LOCK(ic);
4437                         return myerr;
4438                 }
4439                 break;
4440
4441         case IEEE80211_S_AUTH:
4442                 if ((error = iwm_auth(vap, sc)) != 0) {
4443                         device_printf(sc->sc_dev,
4444                             "%s: could not move to auth state: %d\n",
4445                             __func__, error);
4446                 }
4447                 break;
4448
4449         case IEEE80211_S_ASSOC:
4450                 /*
4451                  * EBS may be disabled due to previous failures reported by FW.
4452                  * Reset EBS status here assuming environment has been changed.
4453                  */
4454                 sc->last_ebs_successful = TRUE;
4455                 break;
4456
4457         case IEEE80211_S_RUN:
4458         {
4459                 struct iwm_host_cmd cmd = {
4460                         .id = IWM_LQ_CMD,
4461                         .len = { sizeof(in->in_lq), },
4462                         .flags = IWM_CMD_SYNC,
4463                 };
4464
4465                 in = IWM_NODE(vap->iv_bss);
4466                 /* Update the association state, now we have it all */
4467                 /* (eg associd comes in at this point */
4468                 error = iwm_mvm_update_sta(sc, in);
4469                 if (error != 0) {
4470                         device_printf(sc->sc_dev,
4471                             "%s: failed to update STA\n", __func__);
4472                         IWM_UNLOCK(sc);
4473                         IEEE80211_LOCK(ic);
4474                         return error;
4475                 }
4476                 in->in_assoc = 1;
4477                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4478                 if (error != 0) {
4479                         device_printf(sc->sc_dev,
4480                             "%s: failed to update MAC: %d\n", __func__, error);
4481                 }
4482
4483                 iwm_mvm_enable_beacon_filter(sc, ivp);
4484                 iwm_mvm_power_update_mac(sc);
4485                 iwm_mvm_update_quotas(sc, ivp);
4486                 iwm_setrates(sc, in);
4487
4488                 cmd.data[0] = &in->in_lq;
4489                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4490                         device_printf(sc->sc_dev,
4491                             "%s: IWM_LQ_CMD failed\n", __func__);
4492                 }
4493
4494                 iwm_mvm_led_enable(sc);
4495                 break;
4496         }
4497
4498         default:
4499                 break;
4500         }
4501         IWM_UNLOCK(sc);
4502         IEEE80211_LOCK(ic);
4503
4504         return (ivp->iv_newstate(vap, nstate, arg));
4505 }
4506
4507 void
4508 iwm_endscan_cb(void *arg, int pending)
4509 {
4510         struct iwm_softc *sc = arg;
4511         struct ieee80211com *ic = &sc->sc_ic;
4512
4513         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4514             "%s: scan ended\n",
4515             __func__);
4516
4517         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4518 }
4519
4520 /*
4521  * Aging and idle timeouts for the different possible scenarios
4522  * in default configuration
4523  */
4524 static const uint32_t
4525 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4526         {
4527                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4528                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4529         },
4530         {
4531                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4532                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4533         },
4534         {
4535                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4536                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4537         },
4538         {
4539                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4540                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4541         },
4542         {
4543                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4544                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4545         },
4546 };
4547
4548 /*
4549  * Aging and idle timeouts for the different possible scenarios
4550  * in single BSS MAC configuration.
4551  */
4552 static const uint32_t
4553 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4554         {
4555                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4556                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4557         },
4558         {
4559                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4560                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4561         },
4562         {
4563                 htole32(IWM_SF_MCAST_AGING_TIMER),
4564                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4565         },
4566         {
4567                 htole32(IWM_SF_BA_AGING_TIMER),
4568                 htole32(IWM_SF_BA_IDLE_TIMER)
4569         },
4570         {
4571                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4572                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4573         },
4574 };
4575
4576 static void
4577 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4578     struct ieee80211_node *ni)
4579 {
4580         int i, j, watermark;
4581
4582         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4583
4584         /*
4585          * If we are in association flow - check antenna configuration
4586          * capabilities of the AP station, and choose the watermark accordingly.
4587          */
4588         if (ni) {
4589                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4590 #ifdef notyet
4591                         if (ni->ni_rxmcs[2] != 0)
4592                                 watermark = IWM_SF_W_MARK_MIMO3;
4593                         else if (ni->ni_rxmcs[1] != 0)
4594                                 watermark = IWM_SF_W_MARK_MIMO2;
4595                         else
4596 #endif
4597                                 watermark = IWM_SF_W_MARK_SISO;
4598                 } else {
4599                         watermark = IWM_SF_W_MARK_LEGACY;
4600                 }
4601         /* default watermark value for unassociated mode. */
4602         } else {
4603                 watermark = IWM_SF_W_MARK_MIMO2;
4604         }
4605         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4606
4607         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4608                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4609                         sf_cmd->long_delay_timeouts[i][j] =
4610                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4611                 }
4612         }
4613
4614         if (ni) {
4615                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4616                        sizeof(iwm_sf_full_timeout));
4617         } else {
4618                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4619                        sizeof(iwm_sf_full_timeout_def));
4620         }
4621 }
4622
4623 static int
4624 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4625 {
4626         struct ieee80211com *ic = &sc->sc_ic;
4627         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4628         struct iwm_sf_cfg_cmd sf_cmd = {
4629                 .state = htole32(IWM_SF_FULL_ON),
4630         };
4631         int ret = 0;
4632
4633         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4634                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4635
4636         switch (new_state) {
4637         case IWM_SF_UNINIT:
4638         case IWM_SF_INIT_OFF:
4639                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4640                 break;
4641         case IWM_SF_FULL_ON:
4642                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4643                 break;
4644         default:
4645                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4646                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4647                           new_state);
4648                 return EINVAL;
4649         }
4650
4651         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4652                                    sizeof(sf_cmd), &sf_cmd);
4653         return ret;
4654 }
4655
4656 static int
4657 iwm_send_bt_init_conf(struct iwm_softc *sc)
4658 {
4659         struct iwm_bt_coex_cmd bt_cmd;
4660
4661         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4662         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4663
4664         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4665             &bt_cmd);
4666 }
4667
4668 static int
4669 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4670 {
4671         struct iwm_mcc_update_cmd mcc_cmd;
4672         struct iwm_host_cmd hcmd = {
4673                 .id = IWM_MCC_UPDATE_CMD,
4674                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4675                 .data = { &mcc_cmd },
4676         };
4677         int ret;
4678 #ifdef IWM_DEBUG
4679         struct iwm_rx_packet *pkt;
4680         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4681         struct iwm_mcc_update_resp *mcc_resp;
4682         int n_channels;
4683         uint16_t mcc;
4684 #endif
4685         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4686             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4687
4688         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4689         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4690         if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4691             fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4692                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4693         else
4694                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4695
4696         if (resp_v2)
4697                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4698         else
4699                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4700
4701         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4702             "send MCC update to FW with '%c%c' src = %d\n",
4703             alpha2[0], alpha2[1], mcc_cmd.source_id);
4704
4705         ret = iwm_send_cmd(sc, &hcmd);
4706         if (ret)
4707                 return ret;
4708
4709 #ifdef IWM_DEBUG
4710         pkt = hcmd.resp_pkt;
4711
4712         /* Extract MCC response */
4713         if (resp_v2) {
4714                 mcc_resp = (void *)pkt->data;
4715                 mcc = mcc_resp->mcc;
4716                 n_channels =  le32toh(mcc_resp->n_channels);
4717         } else {
4718                 mcc_resp_v1 = (void *)pkt->data;
4719                 mcc = mcc_resp_v1->mcc;
4720                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4721         }
4722
4723         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4724         if (mcc == 0)
4725                 mcc = 0x3030;  /* "00" - world */
4726
4727         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4728             "regulatory domain '%c%c' (%d channels available)\n",
4729             mcc >> 8, mcc & 0xff, n_channels);
4730 #endif
4731         iwm_free_resp(sc, &hcmd);
4732
4733         return 0;
4734 }
4735
4736 static void
4737 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4738 {
4739         struct iwm_host_cmd cmd = {
4740                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4741                 .len = { sizeof(uint32_t), },
4742                 .data = { &backoff, },
4743         };
4744
4745         if (iwm_send_cmd(sc, &cmd) != 0) {
4746                 device_printf(sc->sc_dev,
4747                     "failed to change thermal tx backoff\n");
4748         }
4749 }
4750
4751 static int
4752 iwm_init_hw(struct iwm_softc *sc)
4753 {
4754         struct ieee80211com *ic = &sc->sc_ic;
4755         int error, i, ac;
4756
4757         if ((error = iwm_start_hw(sc)) != 0) {
4758                 kprintf("iwm_start_hw: failed %d\n", error);
4759                 return error;
4760         }
4761
4762         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4763                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4764                 return error;
4765         }
4766
4767         /*
4768          * should stop and start HW since that INIT
4769          * image just loaded
4770          */
4771         iwm_stop_device(sc);
4772         sc->sc_ps_disabled = FALSE;
4773         if ((error = iwm_start_hw(sc)) != 0) {
4774                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4775                 return error;
4776         }
4777
4778         /* omstart, this time with the regular firmware */
4779         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4780         if (error) {
4781                 device_printf(sc->sc_dev, "could not load firmware\n");
4782                 goto error;
4783         }
4784
4785         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4786                 device_printf(sc->sc_dev, "bt init conf failed\n");
4787                 goto error;
4788         }
4789
4790         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4791         if (error != 0) {
4792                 device_printf(sc->sc_dev, "antenna config failed\n");
4793                 goto error;
4794         }
4795
4796         /* Send phy db control command and then phy db calibration */
4797         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4798                 goto error;
4799
4800         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4801                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4802                 goto error;
4803         }
4804
4805         /* Add auxiliary station for scanning */
4806         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4807                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4808                 goto error;
4809         }
4810
4811         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4812                 /*
4813                  * The channel used here isn't relevant as it's
4814                  * going to be overwritten in the other flows.
4815                  * For now use the first channel we have.
4816                  */
4817                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4818                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4819                         goto error;
4820         }
4821
4822         /* Initialize tx backoffs to the minimum. */
4823         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4824                 iwm_mvm_tt_tx_backoff(sc, 0);
4825
4826         error = iwm_mvm_power_update_device(sc);
4827         if (error)
4828                 goto error;
4829
4830         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4831                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4832                         goto error;
4833         }
4834
4835         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4836                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4837                         goto error;
4838         }
4839
4840         /* Enable Tx queues. */
4841         for (ac = 0; ac < WME_NUM_AC; ac++) {
4842                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4843                     iwm_mvm_ac_to_tx_fifo[ac]);
4844                 if (error)
4845                         goto error;
4846         }
4847
4848         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4849                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4850                 goto error;
4851         }
4852
4853         return 0;
4854
4855  error:
4856         iwm_stop_device(sc);
4857         return error;
4858 }
4859
4860 /* Allow multicast from our BSSID. */
4861 static int
4862 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4863 {
4864         struct ieee80211_node *ni = vap->iv_bss;
4865         struct iwm_mcast_filter_cmd *cmd;
4866         size_t size;
4867         int error;
4868
4869         size = roundup(sizeof(*cmd), 4);
4870         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4871         if (cmd == NULL)
4872                 return ENOMEM;
4873         cmd->filter_own = 1;
4874         cmd->port_id = 0;
4875         cmd->count = 0;
4876         cmd->pass_all = 1;
4877         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4878
4879         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4880             IWM_CMD_SYNC, size, cmd);
4881         kfree(cmd, M_DEVBUF);
4882
4883         return (error);
4884 }
4885
4886 /*
4887  * ifnet interfaces
4888  */
4889
4890 static void
4891 iwm_init(struct iwm_softc *sc)
4892 {
4893         int error;
4894
4895         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4896                 return;
4897         }
4898         sc->sc_generation++;
4899         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4900
4901         if ((error = iwm_init_hw(sc)) != 0) {
4902                 kprintf("iwm_init_hw failed %d\n", error);
4903                 iwm_stop(sc);
4904                 return;
4905         }
4906
4907         /*
4908          * Ok, firmware loaded and we are jogging
4909          */
4910         sc->sc_flags |= IWM_FLAG_HW_INITED;
4911         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4912 }
4913
4914 static int
4915 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4916 {
4917         struct iwm_softc *sc;
4918         int error;
4919
4920         sc = ic->ic_softc;
4921
4922         IWM_LOCK(sc);
4923         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4924                 IWM_UNLOCK(sc);
4925                 return (ENXIO);
4926         }
4927         error = mbufq_enqueue(&sc->sc_snd, m);
4928         if (error) {
4929                 IWM_UNLOCK(sc);
4930                 return (error);
4931         }
4932         iwm_start(sc);
4933         IWM_UNLOCK(sc);
4934         return (0);
4935 }
4936
4937 /*
4938  * Dequeue packets from sendq and call send.
4939  */
4940 static void
4941 iwm_start(struct iwm_softc *sc)
4942 {
4943         struct ieee80211_node *ni;
4944         struct mbuf *m;
4945         int ac = 0;
4946
4947         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4948         while (sc->qfullmsk == 0 &&
4949                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4950                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4951                 if (iwm_tx(sc, m, ni, ac) != 0) {
4952                         if_inc_counter(ni->ni_vap->iv_ifp,
4953                             IFCOUNTER_OERRORS, 1);
4954                         ieee80211_free_node(ni);
4955                         continue;
4956                 }
4957                 sc->sc_tx_timer = 15;
4958         }
4959         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4960 }
4961
4962 static void
4963 iwm_stop(struct iwm_softc *sc)
4964 {
4965
4966         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4967         sc->sc_flags |= IWM_FLAG_STOPPED;
4968         sc->sc_generation++;
4969         iwm_led_blink_stop(sc);
4970         sc->sc_tx_timer = 0;
4971         iwm_stop_device(sc);
4972         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4973 }
4974
4975 static void
4976 iwm_watchdog(void *arg)
4977 {
4978         struct iwm_softc *sc = arg;
4979
4980         if (sc->sc_tx_timer > 0) {
4981                 if (--sc->sc_tx_timer == 0) {
4982                         device_printf(sc->sc_dev, "device timeout\n");
4983 #ifdef IWM_DEBUG
4984                         iwm_nic_error(sc);
4985 #endif
4986                         iwm_stop(sc);
4987 #if defined(__DragonFly__)
4988                         ++sc->sc_ic.ic_oerrors;
4989 #else
4990                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4991 #endif
4992                         return;
4993                 }
4994         }
4995         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4996 }
4997
4998 static void
4999 iwm_parent(struct ieee80211com *ic)
5000 {
5001         struct iwm_softc *sc = ic->ic_softc;
5002         int startall = 0;
5003
5004         IWM_LOCK(sc);
5005         if (ic->ic_nrunning > 0) {
5006                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5007                         iwm_init(sc);
5008                         startall = 1;
5009                 }
5010         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5011                 iwm_stop(sc);
5012         IWM_UNLOCK(sc);
5013         if (startall)
5014                 ieee80211_start_all(ic);
5015 }
5016
5017 /*
5018  * The interrupt side of things
5019  */
5020
5021 /*
5022  * error dumping routines are from iwlwifi/mvm/utils.c
5023  */
5024
5025 /*
5026  * Note: This structure is read from the device with IO accesses,
5027  * and the reading already does the endian conversion. As it is
5028  * read with uint32_t-sized accesses, any members with a different size
5029  * need to be ordered correctly though!
5030  */
5031 struct iwm_error_event_table {
5032         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5033         uint32_t error_id;              /* type of error */
5034         uint32_t trm_hw_status0;        /* TRM HW status */
5035         uint32_t trm_hw_status1;        /* TRM HW status */
5036         uint32_t blink2;                /* branch link */
5037         uint32_t ilink1;                /* interrupt link */
5038         uint32_t ilink2;                /* interrupt link */
5039         uint32_t data1;         /* error-specific data */
5040         uint32_t data2;         /* error-specific data */
5041         uint32_t data3;         /* error-specific data */
5042         uint32_t bcon_time;             /* beacon timer */
5043         uint32_t tsf_low;               /* network timestamp function timer */
5044         uint32_t tsf_hi;                /* network timestamp function timer */
5045         uint32_t gp1;           /* GP1 timer register */
5046         uint32_t gp2;           /* GP2 timer register */
5047         uint32_t fw_rev_type;   /* firmware revision type */
5048         uint32_t major;         /* uCode version major */
5049         uint32_t minor;         /* uCode version minor */
5050         uint32_t hw_ver;                /* HW Silicon version */
5051         uint32_t brd_ver;               /* HW board version */
5052         uint32_t log_pc;                /* log program counter */
5053         uint32_t frame_ptr;             /* frame pointer */
5054         uint32_t stack_ptr;             /* stack pointer */
5055         uint32_t hcmd;          /* last host command header */
5056         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5057                                  * rxtx_flag */
5058         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5059                                  * host_flag */
5060         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5061                                  * enc_flag */
5062         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5063                                  * time_flag */
5064         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5065                                  * wico interrupt */
5066         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5067         uint32_t wait_event;            /* wait event() caller address */
5068         uint32_t l2p_control;   /* L2pControlField */
5069         uint32_t l2p_duration;  /* L2pDurationField */
5070         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5071         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5072         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5073                                  * (LMPM_PMG_SEL) */
5074         uint32_t u_timestamp;   /* indicate when the date and time of the
5075                                  * compilation */
5076         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5077 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5078
5079 /*
5080  * UMAC error struct - relevant starting from family 8000 chip.
5081  * Note: This structure is read from the device with IO accesses,
5082  * and the reading already does the endian conversion. As it is
5083  * read with u32-sized accesses, any members with a different size
5084  * need to be ordered correctly though!
5085  */
5086 struct iwm_umac_error_event_table {
5087         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5088         uint32_t error_id;      /* type of error */
5089         uint32_t blink1;        /* branch link */
5090         uint32_t blink2;        /* branch link */
5091         uint32_t ilink1;        /* interrupt link */
5092         uint32_t ilink2;        /* interrupt link */
5093         uint32_t data1;         /* error-specific data */
5094         uint32_t data2;         /* error-specific data */
5095         uint32_t data3;         /* error-specific data */
5096         uint32_t umac_major;
5097         uint32_t umac_minor;
5098         uint32_t frame_pointer; /* core register 27*/
5099         uint32_t stack_pointer; /* core register 28 */
5100         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5101         uint32_t nic_isr_pref;  /* ISR status register */
5102 } __packed;
5103
5104 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5105 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5106
5107 #ifdef IWM_DEBUG
5108 struct {
5109         const char *name;
5110         uint8_t num;
5111 } advanced_lookup[] = {
5112         { "NMI_INTERRUPT_WDG", 0x34 },
5113         { "SYSASSERT", 0x35 },
5114         { "UCODE_VERSION_MISMATCH", 0x37 },
5115         { "BAD_COMMAND", 0x38 },
5116         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5117         { "FATAL_ERROR", 0x3D },
5118         { "NMI_TRM_HW_ERR", 0x46 },
5119         { "NMI_INTERRUPT_TRM", 0x4C },
5120         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5121         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5122         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5123         { "NMI_INTERRUPT_HOST", 0x66 },
5124         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5125         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5126         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5127         { "ADVANCED_SYSASSERT", 0 },
5128 };
5129
5130 static const char *
5131 iwm_desc_lookup(uint32_t num)
5132 {
5133         int i;
5134
5135         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5136                 if (advanced_lookup[i].num == num)
5137                         return advanced_lookup[i].name;
5138
5139         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5140         return advanced_lookup[i].name;
5141 }
5142
5143 static void
5144 iwm_nic_umac_error(struct iwm_softc *sc)
5145 {
5146         struct iwm_umac_error_event_table table;
5147         uint32_t base;
5148
5149         base = sc->umac_error_event_table;
5150
5151         if (base < 0x800000) {
5152                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5153                     base);
5154                 return;
5155         }
5156
5157         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5158                 device_printf(sc->sc_dev, "reading errlog failed\n");
5159                 return;
5160         }
5161
5162         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5163                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5164                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5165                     sc->sc_flags, table.valid);
5166         }
5167
5168         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5169                 iwm_desc_lookup(table.error_id));
5170         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5171         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5172         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5173             table.ilink1);
5174         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5175             table.ilink2);
5176         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5177         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5178         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5179         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5180         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5181         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5182             table.frame_pointer);
5183         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5184             table.stack_pointer);
5185         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5186         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5187             table.nic_isr_pref);
5188 }
5189
5190 /*
5191  * Support for dumping the error log seemed like a good idea ...
5192  * but it's mostly hex junk and the only sensible thing is the
5193  * hw/ucode revision (which we know anyway).  Since it's here,
5194  * I'll just leave it in, just in case e.g. the Intel guys want to
5195  * help us decipher some "ADVANCED_SYSASSERT" later.
5196  */
5197 static void
5198 iwm_nic_error(struct iwm_softc *sc)
5199 {
5200         struct iwm_error_event_table table;
5201         uint32_t base;
5202
5203         device_printf(sc->sc_dev, "dumping device error log\n");
5204         base = sc->error_event_table;
5205         if (base < 0x800000) {
5206                 device_printf(sc->sc_dev,
5207                     "Invalid error log pointer 0x%08x\n", base);
5208                 return;
5209         }
5210
5211         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5212                 device_printf(sc->sc_dev, "reading errlog failed\n");
5213                 return;
5214         }
5215
5216         if (!table.valid) {
5217                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5218                 return;
5219         }
5220
5221         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5222                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5223                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5224                     sc->sc_flags, table.valid);
5225         }
5226
5227         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5228             iwm_desc_lookup(table.error_id));
5229         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5230             table.trm_hw_status0);
5231         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5232             table.trm_hw_status1);
5233         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5234         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5235         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5236         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5237         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5238         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5239         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5240         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5241         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5242         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5243         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5244         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5245             table.fw_rev_type);
5246         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5247         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5248         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5249         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5250         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5251         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5252         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5253         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5254         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5255         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5256         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5257         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5258         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5259         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5260         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5261         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5262         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5263         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5264         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5265
5266         if (sc->umac_error_event_table)
5267                 iwm_nic_umac_error(sc);
5268 }
5269 #endif
5270
5271 static void
5272 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5273 {
5274         struct ieee80211com *ic = &sc->sc_ic;
5275         struct iwm_cmd_response *cresp;
5276         struct mbuf *m1;
5277         uint32_t offset = 0;
5278         uint32_t maxoff = IWM_RBUF_SIZE;
5279         uint32_t nextoff;
5280         boolean_t stolen = FALSE;
5281
5282 #define HAVEROOM(a)     \
5283     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5284
5285         while (HAVEROOM(offset)) {
5286                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5287                     offset);
5288                 int qid, idx, code, len;
5289
5290                 qid = pkt->hdr.qid;
5291                 idx = pkt->hdr.idx;
5292
5293                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5294
5295                 /*
5296                  * randomly get these from the firmware, no idea why.
5297                  * they at least seem harmless, so just ignore them for now
5298                  */
5299                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5300                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5301                         break;
5302                 }
5303
5304                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5305                     "rx packet qid=%d idx=%d type=%x\n",
5306                     qid & ~0x80, pkt->hdr.idx, code);
5307
5308                 len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5309                 len += sizeof(uint32_t); /* account for status word */
5310                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5311
5312                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5313
5314                 switch (code) {
5315                 case IWM_REPLY_RX_PHY_CMD:
5316                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5317                         break;
5318
5319                 case IWM_REPLY_RX_MPDU_CMD: {
5320                         /*
5321                          * If this is the last frame in the RX buffer, we
5322                          * can directly feed the mbuf to the sharks here.
5323                          */
5324                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5325                             struct iwm_rx_packet *, nextoff);
5326                         if (!HAVEROOM(nextoff) ||
5327                             (nextpkt->hdr.code == 0 &&
5328                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5329                              nextpkt->hdr.idx == 0) ||
5330                             (nextpkt->len_n_flags ==
5331                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5332                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5333                                         stolen = FALSE;
5334                                         /* Make sure we abort the loop */
5335                                         nextoff = maxoff;
5336                                 }
5337                                 break;
5338                         }
5339
5340                         /*
5341                          * Use m_copym instead of m_split, because that
5342                          * makes it easier to keep a valid rx buffer in
5343                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5344                          *
5345                          * We need to start m_copym() at offset 0, to get the
5346                          * M_PKTHDR flag preserved.
5347                          */
5348                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5349                         if (m1) {
5350                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5351                                         stolen = TRUE;
5352                                 else
5353                                         m_freem(m1);
5354                         }
5355                         break;
5356                 }
5357
5358                 case IWM_TX_CMD:
5359                         iwm_mvm_rx_tx_cmd(sc, pkt);
5360                         break;
5361
5362                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5363                         struct iwm_missed_beacons_notif *resp;
5364                         int missed;
5365
5366                         /* XXX look at mac_id to determine interface ID */
5367                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5368
5369                         resp = (void *)pkt->data;
5370                         missed = le32toh(resp->consec_missed_beacons);
5371
5372                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5373                             "%s: MISSED_BEACON: mac_id=%d, "
5374                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5375                             "num_rx=%d\n",
5376                             __func__,
5377                             le32toh(resp->mac_id),
5378                             le32toh(resp->consec_missed_beacons_since_last_rx),
5379                             le32toh(resp->consec_missed_beacons),
5380                             le32toh(resp->num_expected_beacons),
5381                             le32toh(resp->num_recvd_beacons));
5382
5383                         /* Be paranoid */
5384                         if (vap == NULL)
5385                                 break;
5386
5387                         /* XXX no net80211 locking? */
5388                         if (vap->iv_state == IEEE80211_S_RUN &&
5389                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5390                                 if (missed > vap->iv_bmissthreshold) {
5391                                         /* XXX bad locking; turn into task */
5392                                         IWM_UNLOCK(sc);
5393                                         ieee80211_beacon_miss(ic);
5394                                         IWM_LOCK(sc);
5395                                 }
5396                         }
5397
5398                         break; }
5399
5400                 case IWM_MFUART_LOAD_NOTIFICATION:
5401                         break;
5402
5403                 case IWM_MVM_ALIVE:
5404                         break;
5405
5406                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5407                         break;
5408
5409                 case IWM_STATISTICS_NOTIFICATION: {
5410                         struct iwm_notif_statistics *stats;
5411                         stats = (void *)pkt->data;
5412                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5413                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5414                         break;
5415                 }
5416
5417                 case IWM_NVM_ACCESS_CMD:
5418                 case IWM_MCC_UPDATE_CMD:
5419                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5420                                 memcpy(sc->sc_cmd_resp,
5421                                     pkt, sizeof(sc->sc_cmd_resp));
5422                         }
5423                         break;
5424
5425                 case IWM_MCC_CHUB_UPDATE_CMD: {
5426                         struct iwm_mcc_chub_notif *notif;
5427                         notif = (void *)pkt->data;
5428
5429                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5430                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5431                         sc->sc_fw_mcc[2] = '\0';
5432                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5433                             "fw source %d sent CC '%s'\n",
5434                             notif->source_id, sc->sc_fw_mcc);
5435                         break;
5436                 }
5437
5438                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5439                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5440                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5441                         struct iwm_dts_measurement_notif_v1 *notif;
5442
5443                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5444                                 device_printf(sc->sc_dev,
5445                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5446                                 break;
5447                         }
5448                         notif = (void *)pkt->data;
5449                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5450                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5451                             notif->temp);
5452                         break;
5453                 }
5454
5455                 case IWM_PHY_CONFIGURATION_CMD:
5456                 case IWM_TX_ANT_CONFIGURATION_CMD:
5457                 case IWM_ADD_STA:
5458                 case IWM_MAC_CONTEXT_CMD:
5459                 case IWM_REPLY_SF_CFG_CMD:
5460                 case IWM_POWER_TABLE_CMD:
5461                 case IWM_PHY_CONTEXT_CMD:
5462                 case IWM_BINDING_CONTEXT_CMD:
5463                 case IWM_TIME_EVENT_CMD:
5464                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5465                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5466                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5467                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5468                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5469                 case IWM_REPLY_BEACON_FILTERING_CMD:
5470                 case IWM_MAC_PM_POWER_TABLE:
5471                 case IWM_TIME_QUOTA_CMD:
5472                 case IWM_REMOVE_STA:
5473                 case IWM_TXPATH_FLUSH:
5474                 case IWM_LQ_CMD:
5475                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5476                                  IWM_FW_PAGING_BLOCK_CMD):
5477                 case IWM_BT_CONFIG:
5478                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5479                         cresp = (void *)pkt->data;
5480                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5481                                 memcpy(sc->sc_cmd_resp,
5482                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5483                         }
5484                         break;
5485
5486                 /* ignore */
5487                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5488                         break;
5489
5490                 case IWM_INIT_COMPLETE_NOTIF:
5491                         break;
5492
5493                 case IWM_SCAN_OFFLOAD_COMPLETE:
5494                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5495                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5496                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5497                                 ieee80211_runtask(ic, &sc->sc_es_task);
5498                         }
5499                         break;
5500
5501                 case IWM_SCAN_ITERATION_COMPLETE: {
5502                         struct iwm_lmac_scan_complete_notif *notif;
5503                         notif = (void *)pkt->data;
5504                         break;
5505                 }
5506
5507                 case IWM_SCAN_COMPLETE_UMAC:
5508                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5509                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5510                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5511                                 ieee80211_runtask(ic, &sc->sc_es_task);
5512                         }
5513                         break;
5514
5515                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5516                         struct iwm_umac_scan_iter_complete_notif *notif;
5517                         notif = (void *)pkt->data;
5518
5519                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5520                             "complete, status=0x%x, %d channels scanned\n",
5521                             notif->status, notif->scanned_channels);
5522                         break;
5523                 }
5524
5525                 case IWM_REPLY_ERROR: {
5526                         struct iwm_error_resp *resp;
5527                         resp = (void *)pkt->data;
5528
5529                         device_printf(sc->sc_dev,
5530                             "firmware error 0x%x, cmd 0x%x\n",
5531                             le32toh(resp->error_type),
5532                             resp->cmd_id);
5533                         break;
5534                 }
5535
5536                 case IWM_TIME_EVENT_NOTIFICATION: {
5537                         struct iwm_time_event_notif *notif;
5538                         notif = (void *)pkt->data;
5539
5540                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5541                             "TE notif status = 0x%x action = 0x%x\n",
5542                             notif->status, notif->action);
5543                         break;
5544                 }
5545
5546                 case IWM_MCAST_FILTER_CMD:
5547                         break;
5548
5549                 case IWM_SCD_QUEUE_CFG: {
5550                         struct iwm_scd_txq_cfg_rsp *rsp;
5551                         rsp = (void *)pkt->data;
5552
5553                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5554                             "queue cfg token=0x%x sta_id=%d "
5555                             "tid=%d scd_queue=%d\n",
5556                             rsp->token, rsp->sta_id, rsp->tid,
5557                             rsp->scd_queue);
5558                         break;
5559                 }
5560
5561                 default:
5562                         device_printf(sc->sc_dev,
5563                             "frame %d/%d %x UNHANDLED (this should "
5564                             "not happen)\n", qid & ~0x80, idx,
5565                             pkt->len_n_flags);
5566                         break;
5567                 }
5568
5569                 /*
5570                  * Why test bit 0x80?  The Linux driver:
5571                  *
5572                  * There is one exception:  uCode sets bit 15 when it
5573                  * originates the response/notification, i.e. when the
5574                  * response/notification is not a direct response to a
5575                  * command sent by the driver.  For example, uCode issues
5576                  * IWM_REPLY_RX when it sends a received frame to the driver;
5577                  * it is not a direct response to any driver command.
5578                  *
5579                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5580                  * uses a slightly different format for pkt->hdr, and "qid"
5581                  * is actually the upper byte of a two-byte field.
5582                  */
5583                 if (!(qid & (1 << 7)))
5584                         iwm_cmd_done(sc, pkt);
5585
5586                 offset = nextoff;
5587         }
5588         if (stolen)
5589                 m_freem(m);
5590 #undef HAVEROOM
5591 }
5592
5593 /*
5594  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5595  * Basic structure from if_iwn
5596  */
5597 static void
5598 iwm_notif_intr(struct iwm_softc *sc)
5599 {
5600         uint16_t hw;
5601
5602         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5603             BUS_DMASYNC_POSTREAD);
5604
5605         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5606
5607         /*
5608          * Process responses
5609          */
5610         while (sc->rxq.cur != hw) {
5611                 struct iwm_rx_ring *ring = &sc->rxq;
5612                 struct iwm_rx_data *data = &ring->data[ring->cur];
5613
5614                 bus_dmamap_sync(ring->data_dmat, data->map,
5615                     BUS_DMASYNC_POSTREAD);
5616
5617                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5618                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5619                 iwm_handle_rxb(sc, data->m);
5620
5621                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5622         }
5623
5624         /*
5625          * Tell the firmware that it can reuse the ring entries that
5626          * we have just processed.
5627          * Seems like the hardware gets upset unless we align
5628          * the write by 8??
5629          */
5630         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5631         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5632 }
5633
5634 static void
5635 iwm_intr(void *arg)
5636 {
5637         struct iwm_softc *sc = arg;
5638         int handled = 0;
5639         int r1, r2, rv = 0;
5640         int isperiodic = 0;
5641
5642 #if defined(__DragonFly__)
5643         if (sc->sc_mem == NULL) {
5644                 kprintf("iwm_intr: detached\n");
5645                 return;
5646         }
5647 #endif
5648         IWM_LOCK(sc);
5649         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5650
5651         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5652                 uint32_t *ict = sc->ict_dma.vaddr;
5653                 int tmp;
5654
5655                 tmp = htole32(ict[sc->ict_cur]);
5656                 if (!tmp)
5657                         goto out_ena;
5658
5659                 /*
5660                  * ok, there was something.  keep plowing until we have all.
5661                  */
5662                 r1 = r2 = 0;
5663                 while (tmp) {
5664                         r1 |= tmp;
5665                         ict[sc->ict_cur] = 0;
5666                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5667                         tmp = htole32(ict[sc->ict_cur]);
5668                 }
5669
5670                 /* this is where the fun begins.  don't ask */
5671                 if (r1 == 0xffffffff)
5672                         r1 = 0;
5673
5674                 /* i am not expected to understand this */
5675                 if (r1 & 0xc0000)
5676                         r1 |= 0x8000;
5677                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5678         } else {
5679                 r1 = IWM_READ(sc, IWM_CSR_INT);
5680                 /* "hardware gone" (where, fishing?) */
5681                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5682                         goto out;
5683                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5684         }
5685         if (r1 == 0 && r2 == 0) {
5686                 goto out_ena;
5687         }
5688
5689         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5690
5691         /* Safely ignore these bits for debug checks below */
5692         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5693
5694         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5695                 int i;
5696                 struct ieee80211com *ic = &sc->sc_ic;
5697                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5698
5699 #ifdef IWM_DEBUG
5700                 iwm_nic_error(sc);
5701 #endif
5702                 /* Dump driver status (TX and RX rings) while we're here. */
5703                 device_printf(sc->sc_dev, "driver status:\n");
5704                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5705                         struct iwm_tx_ring *ring = &sc->txq[i];
5706                         device_printf(sc->sc_dev,
5707                             "  tx ring %2d: qid=%-2d cur=%-3d "
5708                             "queued=%-3d\n",
5709                             i, ring->qid, ring->cur, ring->queued);
5710                 }
5711                 device_printf(sc->sc_dev,
5712                     "  rx ring: cur=%d\n", sc->rxq.cur);
5713                 device_printf(sc->sc_dev,
5714                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5715
5716                 /* Don't stop the device; just do a VAP restart */
5717                 IWM_UNLOCK(sc);
5718
5719                 if (vap == NULL) {
5720                         kprintf("%s: null vap\n", __func__);
5721                         return;
5722                 }
5723
5724                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5725                     "restarting\n", __func__, vap->iv_state);
5726
5727                 ieee80211_restart_all(ic);
5728                 return;
5729         }
5730
5731         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5732                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5733                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5734                 iwm_stop(sc);
5735                 rv = 1;
5736                 goto out;
5737         }
5738
5739         /* firmware chunk loaded */
5740         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5741                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5742                 handled |= IWM_CSR_INT_BIT_FH_TX;
5743                 sc->sc_fw_chunk_done = 1;
5744                 wakeup(&sc->sc_fw);
5745         }
5746
5747         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5748                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5749                 if (iwm_check_rfkill(sc)) {
5750                         device_printf(sc->sc_dev,
5751                             "%s: rfkill switch, disabling interface\n",
5752                             __func__);
5753                         iwm_stop(sc);
5754                 }
5755         }
5756
5757         /*
5758          * The Linux driver uses periodic interrupts to avoid races.
5759          * We cargo-cult like it's going out of fashion.
5760          */
5761         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5762                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5763                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5764                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5765                         IWM_WRITE_1(sc,
5766                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5767                 isperiodic = 1;
5768         }
5769
5770         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5771                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5772                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5773
5774                 iwm_notif_intr(sc);
5775
5776                 /* enable periodic interrupt, see above */
5777                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5778                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5779                             IWM_CSR_INT_PERIODIC_ENA);
5780         }
5781
5782         if (__predict_false(r1 & ~handled))
5783                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5784                     "%s: unhandled interrupts: %x\n", __func__, r1);
5785         rv = 1;
5786
5787  out_ena:
5788         iwm_restore_interrupts(sc);
5789  out:
5790         IWM_UNLOCK(sc);
5791         return;
5792 }
5793
5794 /*
5795  * Autoconf glue-sniffing
5796  */
5797 #define PCI_VENDOR_INTEL                0x8086
5798 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5799 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5800 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5801 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5802 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5803 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5804 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5805 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5806 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5807 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5808
5809 static const struct iwm_devices {
5810         uint16_t                device;
5811         const struct iwm_cfg    *cfg;
5812 } iwm_devices[] = {
5813         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5814         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5815         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5816         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5817         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5818         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5819         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5820         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5821         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5822         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5823 };
5824
5825 static int
5826 iwm_probe(device_t dev)
5827 {
5828         int i;
5829
5830         for (i = 0; i < nitems(iwm_devices); i++) {
5831                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5832                     pci_get_device(dev) == iwm_devices[i].device) {
5833                         device_set_desc(dev, iwm_devices[i].cfg->name);
5834                         return (BUS_PROBE_DEFAULT);
5835                 }
5836         }
5837
5838         return (ENXIO);
5839 }
5840
5841 static int
5842 iwm_dev_check(device_t dev)
5843 {
5844         struct iwm_softc *sc;
5845         uint16_t devid;
5846         int i;
5847
5848         sc = device_get_softc(dev);
5849
5850         devid = pci_get_device(dev);
5851         for (i = 0; i < NELEM(iwm_devices); i++) {
5852                 if (iwm_devices[i].device == devid) {
5853                         sc->cfg = iwm_devices[i].cfg;
5854                         return (0);
5855                 }
5856         }
5857         device_printf(dev, "unknown adapter type\n");
5858         return ENXIO;
5859 }
5860
5861 /* PCI registers */
5862 #define PCI_CFG_RETRY_TIMEOUT   0x041
5863
5864 static int
5865 iwm_pci_attach(device_t dev)
5866 {
5867         struct iwm_softc *sc;
5868         int count, error, rid;
5869         uint16_t reg;
5870 #if defined(__DragonFly__)
5871         int irq_flags;
5872 #endif
5873
5874         sc = device_get_softc(dev);
5875
5876         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5877          * PCI Tx retries from interfering with C3 CPU state */
5878         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5879
5880         /* Enable bus-mastering and hardware bug workaround. */
5881         pci_enable_busmaster(dev);
5882         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5883         /* if !MSI */
5884         if (reg & PCIM_STATUS_INTxSTATE) {
5885                 reg &= ~PCIM_STATUS_INTxSTATE;
5886         }
5887         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5888
5889         rid = PCIR_BAR(0);
5890         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5891             RF_ACTIVE);
5892         if (sc->sc_mem == NULL) {
5893                 device_printf(sc->sc_dev, "can't map mem space\n");
5894                 return (ENXIO);
5895         }
5896         sc->sc_st = rman_get_bustag(sc->sc_mem);
5897         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5898
5899         /* Install interrupt handler. */
5900         count = 1;
5901         rid = 0;
5902 #if defined(__DragonFly__)
5903         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5904         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5905 #else
5906         if (pci_alloc_msi(dev, &count) == 0)
5907                 rid = 1;
5908         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5909             (rid != 0 ? 0 : RF_SHAREABLE));
5910 #endif
5911         if (sc->sc_irq == NULL) {
5912                 device_printf(dev, "can't map interrupt\n");
5913                         return (ENXIO);
5914         }
5915 #if defined(__DragonFly__)
5916         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5917                                iwm_intr, sc, &sc->sc_ih,
5918                                &wlan_global_serializer);
5919 #else
5920         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5921             NULL, iwm_intr, sc, &sc->sc_ih);
5922 #endif
5923         if (sc->sc_ih == NULL) {
5924                 device_printf(dev, "can't establish interrupt");
5925 #if defined(__DragonFly__)
5926                 pci_release_msi(dev);
5927 #endif
5928                         return (ENXIO);
5929         }
5930         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5931
5932         return (0);
5933 }
5934
5935 static void
5936 iwm_pci_detach(device_t dev)
5937 {
5938         struct iwm_softc *sc = device_get_softc(dev);
5939
5940         if (sc->sc_irq != NULL) {
5941                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5942                 bus_release_resource(dev, SYS_RES_IRQ,
5943                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5944                 pci_release_msi(dev);
5945 #if defined(__DragonFly__)
5946                 sc->sc_irq = NULL;
5947 #endif
5948         }
5949         if (sc->sc_mem != NULL) {
5950                 bus_release_resource(dev, SYS_RES_MEMORY,
5951                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5952 #if defined(__DragonFly__)
5953                 sc->sc_mem = NULL;
5954 #endif
5955         }
5956 }
5957
5958
5959
5960 static int
5961 iwm_attach(device_t dev)
5962 {
5963         struct iwm_softc *sc = device_get_softc(dev);
5964         struct ieee80211com *ic = &sc->sc_ic;
5965         int error;
5966         int txq_i, i;
5967
5968         sc->sc_dev = dev;
5969         sc->sc_attached = 1;
5970         IWM_LOCK_INIT(sc);
5971         mbufq_init(&sc->sc_snd, ifqmaxlen);
5972 #if defined(__DragonFly__)
5973         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5974 #else
5975         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5976 #endif
5977         callout_init(&sc->sc_led_blink_to);
5978         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5979
5980         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5981         if (sc->sc_notif_wait == NULL) {
5982                 device_printf(dev, "failed to init notification wait struct\n");
5983                 goto fail;
5984         }
5985
5986         /* Init phy db */
5987         sc->sc_phy_db = iwm_phy_db_init(sc);
5988         if (!sc->sc_phy_db) {
5989                 device_printf(dev, "Cannot init phy_db\n");
5990                 goto fail;
5991         }
5992
5993         /* Set EBS as successful as long as not stated otherwise by the FW. */
5994         sc->last_ebs_successful = TRUE;
5995
5996         /* PCI attach */
5997         error = iwm_pci_attach(dev);
5998         if (error != 0)
5999                 goto fail;
6000
6001         sc->sc_wantresp = -1;
6002
6003         /* Check device type */
6004         error = iwm_dev_check(dev);
6005         if (error != 0)
6006                 goto fail;
6007
6008         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6009         /*
6010          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6011          * changed, and now the revision step also includes bit 0-1 (no more
6012          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6013          * in the old format.
6014          */
6015         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6016                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6017                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6018
6019         if (iwm_prepare_card_hw(sc) != 0) {
6020                 device_printf(dev, "could not initialize hardware\n");
6021                 goto fail;
6022         }
6023
6024         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6025                 int ret;
6026                 uint32_t hw_step;
6027
6028                 /*
6029                  * In order to recognize C step the driver should read the
6030                  * chip version id located at the AUX bus MISC address.
6031                  */
6032                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6033                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6034                 DELAY(2);
6035
6036                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6037                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6038                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6039                                    25000);
6040                 if (!ret) {
6041                         device_printf(sc->sc_dev,
6042                             "Failed to wake up the nic\n");
6043                         goto fail;
6044                 }
6045
6046                 if (iwm_nic_lock(sc)) {
6047                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6048                         hw_step |= IWM_ENABLE_WFPM;
6049                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6050                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6051                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6052                         if (hw_step == 0x3)
6053                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6054                                                 (IWM_SILICON_C_STEP << 2);
6055                         iwm_nic_unlock(sc);
6056                 } else {
6057                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6058                         goto fail;
6059                 }
6060         }
6061
6062         /* special-case 7265D, it has the same PCI IDs. */
6063         if (sc->cfg == &iwm7265_cfg &&
6064             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6065                 sc->cfg = &iwm7265d_cfg;
6066         }
6067
6068         /* Allocate DMA memory for firmware transfers. */
6069         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6070                 device_printf(dev, "could not allocate memory for firmware\n");
6071                 goto fail;
6072         }
6073
6074         /* Allocate "Keep Warm" page. */
6075         if ((error = iwm_alloc_kw(sc)) != 0) {
6076                 device_printf(dev, "could not allocate keep warm page\n");
6077                 goto fail;
6078         }
6079
6080         /* We use ICT interrupts */
6081         if ((error = iwm_alloc_ict(sc)) != 0) {
6082                 device_printf(dev, "could not allocate ICT table\n");
6083                 goto fail;
6084         }
6085
6086         /* Allocate TX scheduler "rings". */
6087         if ((error = iwm_alloc_sched(sc)) != 0) {
6088                 device_printf(dev, "could not allocate TX scheduler rings\n");
6089                 goto fail;
6090         }
6091
6092         /* Allocate TX rings */
6093         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6094                 if ((error = iwm_alloc_tx_ring(sc,
6095                     &sc->txq[txq_i], txq_i)) != 0) {
6096                         device_printf(dev,
6097                             "could not allocate TX ring %d\n",
6098                             txq_i);
6099                         goto fail;
6100                 }
6101         }
6102
6103         /* Allocate RX ring. */
6104         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6105                 device_printf(dev, "could not allocate RX ring\n");
6106                 goto fail;
6107         }
6108
6109         /* Clear pending interrupts. */
6110         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6111
6112         ic->ic_softc = sc;
6113         ic->ic_name = device_get_nameunit(sc->sc_dev);
6114         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6115         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6116
6117         /* Set device capabilities. */
6118         ic->ic_caps =
6119             IEEE80211_C_STA |
6120             IEEE80211_C_WPA |           /* WPA/RSN */
6121             IEEE80211_C_WME |
6122             IEEE80211_C_PMGT |
6123             IEEE80211_C_SHSLOT |        /* short slot time supported */
6124             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6125 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6126             ;
6127         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6128                 sc->sc_phyctxt[i].id = i;
6129                 sc->sc_phyctxt[i].color = 0;
6130                 sc->sc_phyctxt[i].ref = 0;
6131                 sc->sc_phyctxt[i].channel = NULL;
6132         }
6133
6134         /* Default noise floor */
6135         sc->sc_noise = -96;
6136
6137         /* Max RSSI */
6138         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6139
6140         sc->sc_preinit_hook.ich_func = iwm_preinit;
6141         sc->sc_preinit_hook.ich_arg = sc;
6142         sc->sc_preinit_hook.ich_desc = "iwm";
6143         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6144                 device_printf(dev, "config_intrhook_establish failed\n");
6145                 goto fail;
6146         }
6147
6148 #ifdef IWM_DEBUG
6149         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6150             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6151             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6152 #endif
6153
6154         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6155             "<-%s\n", __func__);
6156
6157         return 0;
6158
6159         /* Free allocated memory if something failed during attachment. */
6160 fail:
6161         iwm_detach_local(sc, 0);
6162
6163         return ENXIO;
6164 }
6165
6166 static int
6167 iwm_is_valid_ether_addr(uint8_t *addr)
6168 {
6169         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6170
6171         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6172                 return (FALSE);
6173
6174         return (TRUE);
6175 }
6176
6177 static int
6178 iwm_wme_update(struct ieee80211com *ic)
6179 {
6180 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6181         struct iwm_softc *sc = ic->ic_softc;
6182         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6183         struct iwm_vap *ivp = IWM_VAP(vap);
6184         struct iwm_node *in;
6185         struct wmeParams tmp[WME_NUM_AC];
6186         int aci, error;
6187
6188         if (vap == NULL)
6189                 return (0);
6190
6191         IEEE80211_LOCK(ic);
6192         for (aci = 0; aci < WME_NUM_AC; aci++)
6193                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6194         IEEE80211_UNLOCK(ic);
6195
6196         IWM_LOCK(sc);
6197         for (aci = 0; aci < WME_NUM_AC; aci++) {
6198                 const struct wmeParams *ac = &tmp[aci];
6199                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6200                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6201                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6202                 ivp->queue_params[aci].edca_txop =
6203                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6204         }
6205         ivp->have_wme = TRUE;
6206         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6207                 in = IWM_NODE(vap->iv_bss);
6208                 if (in->in_assoc) {
6209                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6210                                 device_printf(sc->sc_dev,
6211                                     "%s: failed to update MAC\n", __func__);
6212                         }
6213                 }
6214         }
6215         IWM_UNLOCK(sc);
6216
6217         return (0);
6218 #undef IWM_EXP2
6219 }
6220
6221 static void
6222 iwm_preinit(void *arg)
6223 {
6224         struct iwm_softc *sc = arg;
6225         device_t dev = sc->sc_dev;
6226         struct ieee80211com *ic = &sc->sc_ic;
6227         int error;
6228
6229         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6230             "->%s\n", __func__);
6231
6232         IWM_LOCK(sc);
6233         if ((error = iwm_start_hw(sc)) != 0) {
6234                 device_printf(dev, "could not initialize hardware\n");
6235                 IWM_UNLOCK(sc);
6236                 goto fail;
6237         }
6238
6239         error = iwm_run_init_mvm_ucode(sc, 1);
6240         iwm_stop_device(sc);
6241         if (error) {
6242                 IWM_UNLOCK(sc);
6243                 goto fail;
6244         }
6245         device_printf(dev,
6246             "hw rev 0x%x, fw ver %s, address %s\n",
6247             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6248             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6249
6250         /* not all hardware can do 5GHz band */
6251         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6252                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6253                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6254         IWM_UNLOCK(sc);
6255
6256         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6257             ic->ic_channels);
6258
6259         /*
6260          * At this point we've committed - if we fail to do setup,
6261          * we now also have to tear down the net80211 state.
6262          */
6263         ieee80211_ifattach(ic);
6264         ic->ic_vap_create = iwm_vap_create;
6265         ic->ic_vap_delete = iwm_vap_delete;
6266         ic->ic_raw_xmit = iwm_raw_xmit;
6267         ic->ic_node_alloc = iwm_node_alloc;
6268         ic->ic_scan_start = iwm_scan_start;
6269         ic->ic_scan_end = iwm_scan_end;
6270         ic->ic_update_mcast = iwm_update_mcast;
6271         ic->ic_getradiocaps = iwm_init_channel_map;
6272         ic->ic_set_channel = iwm_set_channel;
6273         ic->ic_scan_curchan = iwm_scan_curchan;
6274         ic->ic_scan_mindwell = iwm_scan_mindwell;
6275         ic->ic_wme.wme_update = iwm_wme_update;
6276         ic->ic_parent = iwm_parent;
6277         ic->ic_transmit = iwm_transmit;
6278         iwm_radiotap_attach(sc);
6279         if (bootverbose)
6280                 ieee80211_announce(ic);
6281
6282         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6283             "<-%s\n", __func__);
6284         config_intrhook_disestablish(&sc->sc_preinit_hook);
6285
6286         return;
6287 fail:
6288         config_intrhook_disestablish(&sc->sc_preinit_hook);
6289         iwm_detach_local(sc, 0);
6290 }
6291
6292 /*
6293  * Attach the interface to 802.11 radiotap.
6294  */
6295 static void
6296 iwm_radiotap_attach(struct iwm_softc *sc)
6297 {
6298         struct ieee80211com *ic = &sc->sc_ic;
6299
6300         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6301             "->%s begin\n", __func__);
6302         ieee80211_radiotap_attach(ic,
6303             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6304                 IWM_TX_RADIOTAP_PRESENT,
6305             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6306                 IWM_RX_RADIOTAP_PRESENT);
6307         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6308             "->%s end\n", __func__);
6309 }
6310
6311 static struct ieee80211vap *
6312 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6313     enum ieee80211_opmode opmode, int flags,
6314     const uint8_t bssid[IEEE80211_ADDR_LEN],
6315     const uint8_t mac[IEEE80211_ADDR_LEN])
6316 {
6317         struct iwm_vap *ivp;
6318         struct ieee80211vap *vap;
6319
6320         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6321                 return NULL;
6322         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6323         vap = &ivp->iv_vap;
6324         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6325         vap->iv_bmissthreshold = 10;            /* override default */
6326         /* Override with driver methods. */
6327         ivp->iv_newstate = vap->iv_newstate;
6328         vap->iv_newstate = iwm_newstate;
6329
6330         ivp->id = IWM_DEFAULT_MACID;
6331         ivp->color = IWM_DEFAULT_COLOR;
6332
6333         ivp->have_wme = FALSE;
6334         ivp->ps_disabled = FALSE;
6335
6336         ieee80211_ratectl_init(vap);
6337         /* Complete setup. */
6338         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6339             mac);
6340         ic->ic_opmode = opmode;
6341
6342         return vap;
6343 }
6344
6345 static void
6346 iwm_vap_delete(struct ieee80211vap *vap)
6347 {
6348         struct iwm_vap *ivp = IWM_VAP(vap);
6349
6350         ieee80211_ratectl_deinit(vap);
6351         ieee80211_vap_detach(vap);
6352         kfree(ivp, M_80211_VAP);
6353 }
6354
6355 static void
6356 iwm_scan_start(struct ieee80211com *ic)
6357 {
6358         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6359         struct iwm_softc *sc = ic->ic_softc;
6360         int error;
6361
6362         IWM_LOCK(sc);
6363         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6364                 /* This should not be possible */
6365                 device_printf(sc->sc_dev,
6366                     "%s: Previous scan not completed yet\n", __func__);
6367         }
6368         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6369                 error = iwm_mvm_umac_scan(sc);
6370         else
6371                 error = iwm_mvm_lmac_scan(sc);
6372         if (error != 0) {
6373                 device_printf(sc->sc_dev, "could not initiate scan\n");
6374                 IWM_UNLOCK(sc);
6375                 ieee80211_cancel_scan(vap);
6376         } else {
6377                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6378                 iwm_led_blink_start(sc);
6379                 IWM_UNLOCK(sc);
6380         }
6381 }
6382
6383 static void
6384 iwm_scan_end(struct ieee80211com *ic)
6385 {
6386         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6387         struct iwm_softc *sc = ic->ic_softc;
6388
6389         IWM_LOCK(sc);
6390         iwm_led_blink_stop(sc);
6391         if (vap->iv_state == IEEE80211_S_RUN)
6392                 iwm_mvm_led_enable(sc);
6393         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6394                 /*
6395                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6396                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6397                  * taskqueue.
6398                  */
6399                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6400                 iwm_mvm_scan_stop_wait(sc);
6401         }
6402         IWM_UNLOCK(sc);
6403
6404         /*
6405          * Make sure we don't race, if sc_es_task is still enqueued here.
6406          * This is to make sure that it won't call ieee80211_scan_done
6407          * when we have already started the next scan.
6408          */
6409         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6410 }
6411
6412 static void
6413 iwm_update_mcast(struct ieee80211com *ic)
6414 {
6415 }
6416
6417 static void
6418 iwm_set_channel(struct ieee80211com *ic)
6419 {
6420 }
6421
6422 static void
6423 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6424 {
6425 }
6426
6427 static void
6428 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6429 {
6430         return;
6431 }
6432
6433 void
6434 iwm_init_task(void *arg1)
6435 {
6436         struct iwm_softc *sc = arg1;
6437
6438         IWM_LOCK(sc);
6439         while (sc->sc_flags & IWM_FLAG_BUSY) {
6440 #if defined(__DragonFly__)
6441                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6442 #else
6443                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6444 #endif
6445 }
6446         sc->sc_flags |= IWM_FLAG_BUSY;
6447         iwm_stop(sc);
6448         if (sc->sc_ic.ic_nrunning > 0)
6449                 iwm_init(sc);
6450         sc->sc_flags &= ~IWM_FLAG_BUSY;
6451         wakeup(&sc->sc_flags);
6452         IWM_UNLOCK(sc);
6453 }
6454
6455 static int
6456 iwm_resume(device_t dev)
6457 {
6458         struct iwm_softc *sc = device_get_softc(dev);
6459         int do_reinit = 0;
6460
6461         /*
6462          * We disable the RETRY_TIMEOUT register (0x41) to keep
6463          * PCI Tx retries from interfering with C3 CPU state.
6464          */
6465         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6466         iwm_init_task(device_get_softc(dev));
6467
6468         IWM_LOCK(sc);
6469         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6470                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6471                 do_reinit = 1;
6472         }
6473         IWM_UNLOCK(sc);
6474
6475         if (do_reinit)
6476                 ieee80211_resume_all(&sc->sc_ic);
6477
6478         return 0;
6479 }
6480
6481 static int
6482 iwm_suspend(device_t dev)
6483 {
6484         int do_stop = 0;
6485         struct iwm_softc *sc = device_get_softc(dev);
6486
6487         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6488
6489         ieee80211_suspend_all(&sc->sc_ic);
6490
6491         if (do_stop) {
6492                 IWM_LOCK(sc);
6493                 iwm_stop(sc);
6494                 sc->sc_flags |= IWM_FLAG_SCANNING;
6495                 IWM_UNLOCK(sc);
6496         }
6497
6498         return (0);
6499 }
6500
6501 static int
6502 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6503 {
6504         struct iwm_fw_info *fw = &sc->sc_fw;
6505         device_t dev = sc->sc_dev;
6506         int i;
6507
6508         if (!sc->sc_attached)
6509                 return 0;
6510         sc->sc_attached = 0;
6511         if (do_net80211) {
6512                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6513         }
6514         callout_drain(&sc->sc_led_blink_to);
6515         callout_drain(&sc->sc_watchdog_to);
6516         iwm_stop_device(sc);
6517         if (do_net80211) {
6518                 ieee80211_ifdetach(&sc->sc_ic);
6519         }
6520
6521         iwm_phy_db_free(sc->sc_phy_db);
6522         sc->sc_phy_db = NULL;
6523
6524         iwm_free_nvm_data(sc->nvm_data);
6525
6526         /* Free descriptor rings */
6527         iwm_free_rx_ring(sc, &sc->rxq);
6528         for (i = 0; i < nitems(sc->txq); i++)
6529                 iwm_free_tx_ring(sc, &sc->txq[i]);
6530
6531         /* Free firmware */
6532         if (fw->fw_fp != NULL)
6533                 iwm_fw_info_free(fw);
6534
6535         /* Free scheduler */
6536         iwm_dma_contig_free(&sc->sched_dma);
6537         iwm_dma_contig_free(&sc->ict_dma);
6538         iwm_dma_contig_free(&sc->kw_dma);
6539         iwm_dma_contig_free(&sc->fw_dma);
6540
6541         iwm_free_fw_paging(sc);
6542
6543         /* Finished with the hardware - detach things */
6544         iwm_pci_detach(dev);
6545
6546         if (sc->sc_notif_wait != NULL) {
6547                 iwm_notification_wait_free(sc->sc_notif_wait);
6548                 sc->sc_notif_wait = NULL;
6549         }
6550
6551         mbufq_drain(&sc->sc_snd);
6552         IWM_LOCK_DESTROY(sc);
6553
6554         return (0);
6555 }
6556
6557 static int
6558 iwm_detach(device_t dev)
6559 {
6560         struct iwm_softc *sc = device_get_softc(dev);
6561
6562         return (iwm_detach_local(sc, 1));
6563 }
6564
6565 static device_method_t iwm_pci_methods[] = {
6566         /* Device interface */
6567         DEVMETHOD(device_probe,         iwm_probe),
6568         DEVMETHOD(device_attach,        iwm_attach),
6569         DEVMETHOD(device_detach,        iwm_detach),
6570         DEVMETHOD(device_suspend,       iwm_suspend),
6571         DEVMETHOD(device_resume,        iwm_resume),
6572
6573         DEVMETHOD_END
6574 };
6575
6576 static driver_t iwm_pci_driver = {
6577         "iwm",
6578         iwm_pci_methods,
6579         sizeof (struct iwm_softc)
6580 };
6581
6582 static devclass_t iwm_devclass;
6583
6584 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6585 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6586 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6587 MODULE_DEPEND(iwm, wlan, 1, 1, 1);