2bd51538827bb62133a8711c39d0d7b92a725418
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150
151 #include <machine/endian.h>
152
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155
156 #include <net/bpf.h>
157
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_pcie_trans.h"
189 #include "if_iwm_led.h"
190 #include "if_iwm_fw.h"
191
192 const uint8_t iwm_nvm_channels[] = {
193         /* 2.4 GHz */
194         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
195         /* 5 GHz */
196         36, 40, 44, 48, 52, 56, 60, 64,
197         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
198         149, 153, 157, 161, 165
199 };
200 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
201     "IWM_NUM_CHANNELS is too small");
202
203 const uint8_t iwm_nvm_channels_8000[] = {
204         /* 2.4 GHz */
205         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
206         /* 5 GHz */
207         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
208         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
209         149, 153, 157, 161, 165, 169, 173, 177, 181
210 };
211 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
212     "IWM_NUM_CHANNELS_8000 is too small");
213
214 #define IWM_NUM_2GHZ_CHANNELS   14
215 #define IWM_N_HW_ADDR_MASK      0xF
216
217 /*
218  * XXX For now, there's simply a fixed set of rate table entries
219  * that are populated.
220  */
221 const struct iwm_rate {
222         uint8_t rate;
223         uint8_t plcp;
224 } iwm_rates[] = {
225         {   2,  IWM_RATE_1M_PLCP  },
226         {   4,  IWM_RATE_2M_PLCP  },
227         {  11,  IWM_RATE_5M_PLCP  },
228         {  22,  IWM_RATE_11M_PLCP },
229         {  12,  IWM_RATE_6M_PLCP  },
230         {  18,  IWM_RATE_9M_PLCP  },
231         {  24,  IWM_RATE_12M_PLCP },
232         {  36,  IWM_RATE_18M_PLCP },
233         {  48,  IWM_RATE_24M_PLCP },
234         {  72,  IWM_RATE_36M_PLCP },
235         {  96,  IWM_RATE_48M_PLCP },
236         { 108,  IWM_RATE_54M_PLCP },
237 };
238 #define IWM_RIDX_CCK    0
239 #define IWM_RIDX_OFDM   4
240 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
241 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
242 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
243
244 struct iwm_nvm_section {
245         uint16_t length;
246         uint8_t *data;
247 };
248
249 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
250 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
251
252 struct iwm_mvm_alive_data {
253         int valid;
254         uint32_t scd_base_addr;
255 };
256
257 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
258 static int      iwm_firmware_store_section(struct iwm_softc *,
259                                            enum iwm_ucode_type,
260                                            const uint8_t *, size_t);
261 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
262 static void     iwm_fw_info_free(struct iwm_fw_info *);
263 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
264 #if !defined(__DragonFly__)
265 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
266 #endif
267 static int      iwm_alloc_fwmem(struct iwm_softc *);
268 static int      iwm_alloc_sched(struct iwm_softc *);
269 static int      iwm_alloc_kw(struct iwm_softc *);
270 static int      iwm_alloc_ict(struct iwm_softc *);
271 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275                                   int);
276 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void     iwm_enable_interrupts(struct iwm_softc *);
279 static void     iwm_restore_interrupts(struct iwm_softc *);
280 static void     iwm_disable_interrupts(struct iwm_softc *);
281 static void     iwm_ict_reset(struct iwm_softc *);
282 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void     iwm_stop_device(struct iwm_softc *);
284 static void     iwm_mvm_nic_config(struct iwm_softc *);
285 static int      iwm_nic_rx_init(struct iwm_softc *);
286 static int      iwm_nic_tx_init(struct iwm_softc *);
287 static int      iwm_nic_init(struct iwm_softc *);
288 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
289 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
290 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
291                                    uint16_t, uint8_t *, uint16_t *);
292 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
293                                      uint16_t *, uint32_t);
294 static uint32_t iwm_eeprom_channel_flags(uint16_t);
295 static void     iwm_add_channel_band(struct iwm_softc *,
296                     struct ieee80211_channel[], int, int *, int, size_t,
297                     const uint8_t[]);
298 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
299                     struct ieee80211_channel[]);
300 static struct iwm_nvm_data *
301         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302                            const uint16_t *, const uint16_t *,
303                            const uint16_t *, const uint16_t *,
304                            const uint16_t *);
305 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
306 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
307                                                struct iwm_nvm_data *,
308                                                const uint16_t *,
309                                                const uint16_t *);
310 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
311                             const uint16_t *);
312 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
313 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
314                                   const uint16_t *);
315 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
316                                    const uint16_t *);
317 static void     iwm_set_radio_cfg(const struct iwm_softc *,
318                                   struct iwm_nvm_data *, uint32_t);
319 static struct iwm_nvm_data *
320         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
321 static int      iwm_nvm_init(struct iwm_softc *);
322 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
323                                       const struct iwm_fw_desc *);
324 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
325                                              bus_addr_t, uint32_t);
326 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
327                                                 const struct iwm_fw_sects *,
328                                                 int, int *);
329 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
330                                            const struct iwm_fw_sects *,
331                                            int, int *);
332 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
333                                                const struct iwm_fw_sects *);
334 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
335                                           const struct iwm_fw_sects *);
336 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
337 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
338 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
339 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
340                                               enum iwm_ucode_type);
341 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
342 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
344 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
345                                             struct iwm_rx_phy_info *);
346 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347                                       struct iwm_rx_packet *,
348                                       struct iwm_rx_data *);
349 static int      iwm_get_noise(struct iwm_softc *sc,
350                     const struct iwm_mvm_statistics_rx_non_phy *);
351 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
352                                    struct iwm_rx_data *);
353 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
354                                          struct iwm_rx_packet *,
355                                          struct iwm_node *);
356 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
357                                   struct iwm_rx_data *);
358 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
359 #if 0
360 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
361                                  uint16_t);
362 #endif
363 static const struct iwm_rate *
364         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
365                         struct ieee80211_frame *, struct iwm_tx_cmd *);
366 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
367                        struct ieee80211_node *, int);
368 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
369                              const struct ieee80211_bpf_params *);
370 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
371                                       uint32_t tfd_msk, uint32_t flags);
372 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
373                                                 struct iwm_mvm_add_sta_cmd_v7 *,
374                                                 int *);
375 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
376                                        int);
377 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
378 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
379 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
380                                            struct iwm_int_sta *,
381                                            const uint8_t *, uint16_t, uint16_t);
382 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
383 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
384 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
385 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
386 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
387 static struct ieee80211_node *
388                 iwm_node_alloc(struct ieee80211vap *,
389                                const uint8_t[IEEE80211_ADDR_LEN]);
390 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
391 static int      iwm_media_change(struct ifnet *);
392 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
393 static void     iwm_endscan_cb(void *, int);
394 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
395                                         struct iwm_sf_cfg_cmd *,
396                                         struct ieee80211_node *);
397 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
398 static int      iwm_send_bt_init_conf(struct iwm_softc *);
399 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
400 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
401 static int      iwm_init_hw(struct iwm_softc *);
402 static void     iwm_init(struct iwm_softc *);
403 static void     iwm_start(struct iwm_softc *);
404 static void     iwm_stop(struct iwm_softc *);
405 static void     iwm_watchdog(void *);
406 static void     iwm_parent(struct ieee80211com *);
407 #ifdef IWM_DEBUG
408 static const char *
409                 iwm_desc_lookup(uint32_t);
410 static void     iwm_nic_error(struct iwm_softc *);
411 static void     iwm_nic_umac_error(struct iwm_softc *);
412 #endif
413 static void     iwm_notif_intr(struct iwm_softc *);
414 static void     iwm_intr(void *);
415 static int      iwm_attach(device_t);
416 static int      iwm_is_valid_ether_addr(uint8_t *);
417 static void     iwm_preinit(void *);
418 static int      iwm_detach_local(struct iwm_softc *sc, int);
419 static void     iwm_init_task(void *);
420 static void     iwm_radiotap_attach(struct iwm_softc *);
421 static struct ieee80211vap *
422                 iwm_vap_create(struct ieee80211com *,
423                                const char [IFNAMSIZ], int,
424                                enum ieee80211_opmode, int,
425                                const uint8_t [IEEE80211_ADDR_LEN],
426                                const uint8_t [IEEE80211_ADDR_LEN]);
427 static void     iwm_vap_delete(struct ieee80211vap *);
428 static void     iwm_scan_start(struct ieee80211com *);
429 static void     iwm_scan_end(struct ieee80211com *);
430 static void     iwm_update_mcast(struct ieee80211com *);
431 static void     iwm_set_channel(struct ieee80211com *);
432 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
433 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
434 static int      iwm_detach(device_t);
435
436 #if defined(__DragonFly__)
437 static int      iwm_msi_enable = 1;
438
439 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
440
441 #endif
442
443 /*
444  * Firmware parser.
445  */
446
447 static int
448 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
449 {
450         const struct iwm_fw_cscheme_list *l = (const void *)data;
451
452         if (dlen < sizeof(*l) ||
453             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
454                 return EINVAL;
455
456         /* we don't actually store anything for now, always use s/w crypto */
457
458         return 0;
459 }
460
461 static int
462 iwm_firmware_store_section(struct iwm_softc *sc,
463     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
464 {
465         struct iwm_fw_sects *fws;
466         struct iwm_fw_desc *fwone;
467
468         if (type >= IWM_UCODE_TYPE_MAX)
469                 return EINVAL;
470         if (dlen < sizeof(uint32_t))
471                 return EINVAL;
472
473         fws = &sc->sc_fw.fw_sects[type];
474         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
475                 return EINVAL;
476
477         fwone = &fws->fw_sect[fws->fw_count];
478
479         /* first 32bit are device load offset */
480         memcpy(&fwone->offset, data, sizeof(uint32_t));
481
482         /* rest is data */
483         fwone->data = data + sizeof(uint32_t);
484         fwone->len = dlen - sizeof(uint32_t);
485
486         fws->fw_count++;
487
488         return 0;
489 }
490
491 #define IWM_DEFAULT_SCAN_CHANNELS 40
492
493 struct iwm_tlv_calib_data {
494         uint32_t ucode_type;
495         struct iwm_tlv_calib_ctrl calib;
496 } __packed;
497
498 static int
499 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
500 {
501         const struct iwm_tlv_calib_data *def_calib = data;
502         uint32_t ucode_type = le32toh(def_calib->ucode_type);
503
504         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
505                 device_printf(sc->sc_dev,
506                     "Wrong ucode_type %u for default "
507                     "calibration.\n", ucode_type);
508                 return EINVAL;
509         }
510
511         sc->sc_default_calib[ucode_type].flow_trigger =
512             def_calib->calib.flow_trigger;
513         sc->sc_default_calib[ucode_type].event_trigger =
514             def_calib->calib.event_trigger;
515
516         return 0;
517 }
518
519 static void
520 iwm_fw_info_free(struct iwm_fw_info *fw)
521 {
522         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
523         fw->fw_fp = NULL;
524         /* don't touch fw->fw_status */
525         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
526 }
527
528 static int
529 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
530 {
531         struct iwm_fw_info *fw = &sc->sc_fw;
532         const struct iwm_tlv_ucode_header *uhdr;
533         struct iwm_ucode_tlv tlv;
534         enum iwm_ucode_tlv_type tlv_type;
535         const struct firmware *fwp;
536         const uint8_t *data;
537         uint32_t usniffer_img;
538         uint32_t paging_mem_size;
539         int num_of_cpus;
540         int error = 0;
541         size_t len;
542
543         if (fw->fw_status == IWM_FW_STATUS_DONE &&
544             ucode_type != IWM_UCODE_INIT)
545                 return 0;
546
547         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
548 #if defined(__DragonFly__)
549                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
550 #else
551                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
552 #endif
553         }
554         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
555
556         if (fw->fw_fp != NULL)
557                 iwm_fw_info_free(fw);
558
559         /*
560          * Load firmware into driver memory.
561          * fw_fp will be set.
562          */
563         IWM_UNLOCK(sc);
564         fwp = firmware_get(sc->cfg->fw_name);
565         IWM_LOCK(sc);
566         if (fwp == NULL) {
567                 device_printf(sc->sc_dev,
568                     "could not read firmware %s (error %d)\n",
569                     sc->cfg->fw_name, error);
570                 goto out;
571         }
572         fw->fw_fp = fwp;
573
574         /* (Re-)Initialize default values. */
575         sc->sc_capaflags = 0;
576         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
577         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
578         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
579
580         /*
581          * Parse firmware contents
582          */
583
584         uhdr = (const void *)fw->fw_fp->data;
585         if (*(const uint32_t *)fw->fw_fp->data != 0
586             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
587                 device_printf(sc->sc_dev, "invalid firmware %s\n",
588                     sc->cfg->fw_name);
589                 error = EINVAL;
590                 goto out;
591         }
592
593         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
594             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
595             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
596             IWM_UCODE_API(le32toh(uhdr->ver)));
597         data = uhdr->data;
598         len = fw->fw_fp->datasize - sizeof(*uhdr);
599
600         while (len >= sizeof(tlv)) {
601                 size_t tlv_len;
602                 const void *tlv_data;
603
604                 memcpy(&tlv, data, sizeof(tlv));
605                 tlv_len = le32toh(tlv.length);
606                 tlv_type = le32toh(tlv.type);
607
608                 len -= sizeof(tlv);
609                 data += sizeof(tlv);
610                 tlv_data = data;
611
612                 if (len < tlv_len) {
613                         device_printf(sc->sc_dev,
614                             "firmware too short: %zu bytes\n",
615                             len);
616                         error = EINVAL;
617                         goto parse_out;
618                 }
619
620                 switch ((int)tlv_type) {
621                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
622                         if (tlv_len < sizeof(uint32_t)) {
623                                 device_printf(sc->sc_dev,
624                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
625                                     __func__,
626                                     (int) tlv_len);
627                                 error = EINVAL;
628                                 goto parse_out;
629                         }
630                         sc->sc_capa_max_probe_len
631                             = le32toh(*(const uint32_t *)tlv_data);
632                         /* limit it to something sensible */
633                         if (sc->sc_capa_max_probe_len >
634                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
635                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
636                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
637                                     "ridiculous\n", __func__);
638                                 error = EINVAL;
639                                 goto parse_out;
640                         }
641                         break;
642                 case IWM_UCODE_TLV_PAN:
643                         if (tlv_len) {
644                                 device_printf(sc->sc_dev,
645                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
646                                     __func__,
647                                     (int) tlv_len);
648                                 error = EINVAL;
649                                 goto parse_out;
650                         }
651                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
652                         break;
653                 case IWM_UCODE_TLV_FLAGS:
654                         if (tlv_len < sizeof(uint32_t)) {
655                                 device_printf(sc->sc_dev,
656                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
657                                     __func__,
658                                     (int) tlv_len);
659                                 error = EINVAL;
660                                 goto parse_out;
661                         }
662                         /*
663                          * Apparently there can be many flags, but Linux driver
664                          * parses only the first one, and so do we.
665                          *
666                          * XXX: why does this override IWM_UCODE_TLV_PAN?
667                          * Intentional or a bug?  Observations from
668                          * current firmware file:
669                          *  1) TLV_PAN is parsed first
670                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
671                          * ==> this resets TLV_PAN to itself... hnnnk
672                          */
673                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
674                         break;
675                 case IWM_UCODE_TLV_CSCHEME:
676                         if ((error = iwm_store_cscheme(sc,
677                             tlv_data, tlv_len)) != 0) {
678                                 device_printf(sc->sc_dev,
679                                     "%s: iwm_store_cscheme(): returned %d\n",
680                                     __func__,
681                                     error);
682                                 goto parse_out;
683                         }
684                         break;
685                 case IWM_UCODE_TLV_NUM_OF_CPU:
686                         if (tlv_len != sizeof(uint32_t)) {
687                                 device_printf(sc->sc_dev,
688                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
689                                     __func__,
690                                     (int) tlv_len);
691                                 error = EINVAL;
692                                 goto parse_out;
693                         }
694                         num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
695                         if (num_of_cpus == 2) {
696                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
697                                         TRUE;
698                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
699                                         TRUE;
700                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
701                                         TRUE;
702                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
703                                 device_printf(sc->sc_dev,
704                                     "%s: Driver supports only 1 or 2 CPUs\n",
705                                     __func__);
706                                 error = EINVAL;
707                                 goto parse_out;
708                         }
709                         break;
710                 case IWM_UCODE_TLV_SEC_RT:
711                         if ((error = iwm_firmware_store_section(sc,
712                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
713                                 device_printf(sc->sc_dev,
714                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
715                                     __func__,
716                                     error);
717                                 goto parse_out;
718                         }
719                         break;
720                 case IWM_UCODE_TLV_SEC_INIT:
721                         if ((error = iwm_firmware_store_section(sc,
722                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
723                                 device_printf(sc->sc_dev,
724                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
725                                     __func__,
726                                     error);
727                                 goto parse_out;
728                         }
729                         break;
730                 case IWM_UCODE_TLV_SEC_WOWLAN:
731                         if ((error = iwm_firmware_store_section(sc,
732                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
733                                 device_printf(sc->sc_dev,
734                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
735                                     __func__,
736                                     error);
737                                 goto parse_out;
738                         }
739                         break;
740                 case IWM_UCODE_TLV_DEF_CALIB:
741                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
742                                 device_printf(sc->sc_dev,
743                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
744                                     __func__,
745                                     (int) tlv_len,
746                                     (int) sizeof(struct iwm_tlv_calib_data));
747                                 error = EINVAL;
748                                 goto parse_out;
749                         }
750                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
751                                 device_printf(sc->sc_dev,
752                                     "%s: iwm_set_default_calib() failed: %d\n",
753                                     __func__,
754                                     error);
755                                 goto parse_out;
756                         }
757                         break;
758                 case IWM_UCODE_TLV_PHY_SKU:
759                         if (tlv_len != sizeof(uint32_t)) {
760                                 error = EINVAL;
761                                 device_printf(sc->sc_dev,
762                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
763                                     __func__,
764                                     (int) tlv_len);
765                                 goto parse_out;
766                         }
767                         sc->sc_fw.phy_config =
768                             le32toh(*(const uint32_t *)tlv_data);
769                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
770                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
771                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
772                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
773                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
774                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
775                         break;
776
777                 case IWM_UCODE_TLV_API_CHANGES_SET: {
778                         const struct iwm_ucode_api *api;
779                         if (tlv_len != sizeof(*api)) {
780                                 error = EINVAL;
781                                 goto parse_out;
782                         }
783                         api = (const struct iwm_ucode_api *)tlv_data;
784                         /* Flags may exceed 32 bits in future firmware. */
785                         if (le32toh(api->api_index) > 0) {
786                                 device_printf(sc->sc_dev,
787                                     "unsupported API index %d\n",
788                                     le32toh(api->api_index));
789                                 goto parse_out;
790                         }
791                         sc->sc_ucode_api = le32toh(api->api_flags);
792                         break;
793                 }
794
795                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
796                         const struct iwm_ucode_capa *capa;
797                         int idx, i;
798                         if (tlv_len != sizeof(*capa)) {
799                                 error = EINVAL;
800                                 goto parse_out;
801                         }
802                         capa = (const struct iwm_ucode_capa *)tlv_data;
803                         idx = le32toh(capa->api_index);
804                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
805                                 device_printf(sc->sc_dev,
806                                     "unsupported API index %d\n", idx);
807                                 goto parse_out;
808                         }
809                         for (i = 0; i < 32; i++) {
810                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
811                                         continue;
812                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
813                         }
814                         break;
815                 }
816
817                 case 48: /* undocumented TLV */
818                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
819                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
820                         /* ignore, not used by current driver */
821                         break;
822
823                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
824                         if ((error = iwm_firmware_store_section(sc,
825                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
826                             tlv_len)) != 0)
827                                 goto parse_out;
828                         break;
829
830                 case IWM_UCODE_TLV_PAGING:
831                         if (tlv_len != sizeof(uint32_t)) {
832                                 error = EINVAL;
833                                 goto parse_out;
834                         }
835                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
836
837                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
838                             "%s: Paging: paging enabled (size = %u bytes)\n",
839                             __func__, paging_mem_size);
840                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
841                                 device_printf(sc->sc_dev,
842                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
843                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
844                                 error = EINVAL;
845                                 goto out;
846                         }
847                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
848                                 device_printf(sc->sc_dev,
849                                     "%s: Paging: image isn't multiple %u\n",
850                                     __func__, IWM_FW_PAGING_SIZE);
851                                 error = EINVAL;
852                                 goto out;
853                         }
854
855                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
856                             paging_mem_size;
857                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
858                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
859                             paging_mem_size;
860                         break;
861
862                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863                         if (tlv_len != sizeof(uint32_t)) {
864                                 error = EINVAL;
865                                 goto parse_out;
866                         }
867                         sc->sc_capa_n_scan_channels =
868                           le32toh(*(const uint32_t *)tlv_data);
869                         break;
870
871                 case IWM_UCODE_TLV_FW_VERSION:
872                         if (tlv_len != sizeof(uint32_t) * 3) {
873                                 error = EINVAL;
874                                 goto parse_out;
875                         }
876                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877                             "%d.%d.%d",
878                             le32toh(((const uint32_t *)tlv_data)[0]),
879                             le32toh(((const uint32_t *)tlv_data)[1]),
880                             le32toh(((const uint32_t *)tlv_data)[2]));
881                         break;
882
883                 case IWM_UCODE_TLV_FW_MEM_SEG:
884                         break;
885
886                 default:
887                         device_printf(sc->sc_dev,
888                             "%s: unknown firmware section %d, abort\n",
889                             __func__, tlv_type);
890                         error = EINVAL;
891                         goto parse_out;
892                 }
893
894                 len -= roundup(tlv_len, 4);
895                 data += roundup(tlv_len, 4);
896         }
897
898         KASSERT(error == 0, ("unhandled error"));
899
900  parse_out:
901         if (error) {
902                 device_printf(sc->sc_dev, "firmware parse error %d, "
903                     "section type %d\n", error, tlv_type);
904         }
905
906         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
907                 device_printf(sc->sc_dev,
908                     "device uses unsupported power ops\n");
909                 error = ENOTSUP;
910         }
911
912  out:
913         if (error) {
914                 fw->fw_status = IWM_FW_STATUS_NONE;
915                 if (fw->fw_fp != NULL)
916                         iwm_fw_info_free(fw);
917         } else
918                 fw->fw_status = IWM_FW_STATUS_DONE;
919         wakeup(&sc->sc_fw);
920
921         return error;
922 }
923
924 /*
925  * DMA resource routines
926  */
927
928 /* fwmem is used to load firmware onto the card */
929 static int
930 iwm_alloc_fwmem(struct iwm_softc *sc)
931 {
932         /* Must be aligned on a 16-byte boundary. */
933         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
934             IWM_FH_MEM_TB_MAX_LENGTH, 16);
935 }
936
937 /* tx scheduler rings.  not used? */
938 static int
939 iwm_alloc_sched(struct iwm_softc *sc)
940 {
941         /* TX scheduler rings must be aligned on a 1KB boundary. */
942         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
943             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
944 }
945
946 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
947 static int
948 iwm_alloc_kw(struct iwm_softc *sc)
949 {
950         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
951 }
952
953 /* interrupt cause table */
954 static int
955 iwm_alloc_ict(struct iwm_softc *sc)
956 {
957         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
958             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
959 }
960
961 static int
962 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
963 {
964         bus_size_t size;
965         int i, error;
966
967         ring->cur = 0;
968
969         /* Allocate RX descriptors (256-byte aligned). */
970         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
971         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
972         if (error != 0) {
973                 device_printf(sc->sc_dev,
974                     "could not allocate RX ring DMA memory\n");
975                 goto fail;
976         }
977         ring->desc = ring->desc_dma.vaddr;
978
979         /* Allocate RX status area (16-byte aligned). */
980         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
981             sizeof(*ring->stat), 16);
982         if (error != 0) {
983                 device_printf(sc->sc_dev,
984                     "could not allocate RX status DMA memory\n");
985                 goto fail;
986         }
987         ring->stat = ring->stat_dma.vaddr;
988
989         /* Create RX buffer DMA tag. */
990 #if defined(__DragonFly__)
991         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
992                                    0,
993                                    BUS_SPACE_MAXADDR_32BIT,
994                                    BUS_SPACE_MAXADDR,
995                                    NULL, NULL,
996                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
997                                    BUS_DMA_NOWAIT, &ring->data_dmat);
998 #else
999         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1000             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1001             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1002 #endif
1003         if (error != 0) {
1004                 device_printf(sc->sc_dev,
1005                     "%s: could not create RX buf DMA tag, error %d\n",
1006                     __func__, error);
1007                 goto fail;
1008         }
1009
1010         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1011         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1012         if (error != 0) {
1013                 device_printf(sc->sc_dev,
1014                     "%s: could not create RX buf DMA map, error %d\n",
1015                     __func__, error);
1016                 goto fail;
1017         }
1018         /*
1019          * Allocate and map RX buffers.
1020          */
1021         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1022                 struct iwm_rx_data *data = &ring->data[i];
1023                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1024                 if (error != 0) {
1025                         device_printf(sc->sc_dev,
1026                             "%s: could not create RX buf DMA map, error %d\n",
1027                             __func__, error);
1028                         goto fail;
1029                 }
1030                 data->m = NULL;
1031
1032                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1033                         goto fail;
1034                 }
1035         }
1036         return 0;
1037
1038 fail:   iwm_free_rx_ring(sc, ring);
1039         return error;
1040 }
1041
1042 static void
1043 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1044 {
1045         /* Reset the ring state */
1046         ring->cur = 0;
1047
1048         /*
1049          * The hw rx ring index in shared memory must also be cleared,
1050          * otherwise the discrepancy can cause reprocessing chaos.
1051          */
1052         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1053 }
1054
1055 static void
1056 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1057 {
1058         int i;
1059
1060         iwm_dma_contig_free(&ring->desc_dma);
1061         iwm_dma_contig_free(&ring->stat_dma);
1062
1063         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1064                 struct iwm_rx_data *data = &ring->data[i];
1065
1066                 if (data->m != NULL) {
1067                         bus_dmamap_sync(ring->data_dmat, data->map,
1068                             BUS_DMASYNC_POSTREAD);
1069                         bus_dmamap_unload(ring->data_dmat, data->map);
1070                         m_freem(data->m);
1071                         data->m = NULL;
1072                 }
1073                 if (data->map != NULL) {
1074                         bus_dmamap_destroy(ring->data_dmat, data->map);
1075                         data->map = NULL;
1076                 }
1077         }
1078         if (ring->spare_map != NULL) {
1079                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1080                 ring->spare_map = NULL;
1081         }
1082         if (ring->data_dmat != NULL) {
1083                 bus_dma_tag_destroy(ring->data_dmat);
1084                 ring->data_dmat = NULL;
1085         }
1086 }
1087
1088 static int
1089 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1090 {
1091         bus_addr_t paddr;
1092         bus_size_t size;
1093         size_t maxsize;
1094         int nsegments;
1095         int i, error;
1096
1097         ring->qid = qid;
1098         ring->queued = 0;
1099         ring->cur = 0;
1100
1101         /* Allocate TX descriptors (256-byte aligned). */
1102         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1103         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1104         if (error != 0) {
1105                 device_printf(sc->sc_dev,
1106                     "could not allocate TX ring DMA memory\n");
1107                 goto fail;
1108         }
1109         ring->desc = ring->desc_dma.vaddr;
1110
1111         /*
1112          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1113          * to allocate commands space for other rings.
1114          */
1115         if (qid > IWM_MVM_CMD_QUEUE)
1116                 return 0;
1117
1118         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1119         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1120         if (error != 0) {
1121                 device_printf(sc->sc_dev,
1122                     "could not allocate TX cmd DMA memory\n");
1123                 goto fail;
1124         }
1125         ring->cmd = ring->cmd_dma.vaddr;
1126
1127         /* FW commands may require more mapped space than packets. */
1128         if (qid == IWM_MVM_CMD_QUEUE) {
1129                 maxsize = IWM_RBUF_SIZE;
1130                 nsegments = 1;
1131         } else {
1132                 maxsize = MCLBYTES;
1133                 nsegments = IWM_MAX_SCATTER - 2;
1134         }
1135
1136 #if defined(__DragonFly__)
1137         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1138                                    0,
1139                                    BUS_SPACE_MAXADDR_32BIT,
1140                                    BUS_SPACE_MAXADDR,
1141                                    NULL, NULL,
1142                                    maxsize, nsegments, maxsize,
1143                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1144 #else
1145         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1146             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1147             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1148 #endif
1149         if (error != 0) {
1150                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1151                 goto fail;
1152         }
1153
1154         paddr = ring->cmd_dma.paddr;
1155         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1156                 struct iwm_tx_data *data = &ring->data[i];
1157
1158                 data->cmd_paddr = paddr;
1159                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1160                     + offsetof(struct iwm_tx_cmd, scratch);
1161                 paddr += sizeof(struct iwm_device_cmd);
1162
1163                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1164                 if (error != 0) {
1165                         device_printf(sc->sc_dev,
1166                             "could not create TX buf DMA map\n");
1167                         goto fail;
1168                 }
1169         }
1170         KASSERT(paddr == ring->cmd_dma.paddr + size,
1171             ("invalid physical address"));
1172         return 0;
1173
1174 fail:   iwm_free_tx_ring(sc, ring);
1175         return error;
1176 }
1177
1178 static void
1179 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1180 {
1181         int i;
1182
1183         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1184                 struct iwm_tx_data *data = &ring->data[i];
1185
1186                 if (data->m != NULL) {
1187                         bus_dmamap_sync(ring->data_dmat, data->map,
1188                             BUS_DMASYNC_POSTWRITE);
1189                         bus_dmamap_unload(ring->data_dmat, data->map);
1190                         m_freem(data->m);
1191                         data->m = NULL;
1192                 }
1193         }
1194         /* Clear TX descriptors. */
1195         memset(ring->desc, 0, ring->desc_dma.size);
1196         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1197             BUS_DMASYNC_PREWRITE);
1198         sc->qfullmsk &= ~(1 << ring->qid);
1199         ring->queued = 0;
1200         ring->cur = 0;
1201
1202         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1203                 iwm_pcie_clear_cmd_in_flight(sc);
1204 }
1205
1206 static void
1207 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1208 {
1209         int i;
1210
1211         iwm_dma_contig_free(&ring->desc_dma);
1212         iwm_dma_contig_free(&ring->cmd_dma);
1213
1214         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1215                 struct iwm_tx_data *data = &ring->data[i];
1216
1217                 if (data->m != NULL) {
1218                         bus_dmamap_sync(ring->data_dmat, data->map,
1219                             BUS_DMASYNC_POSTWRITE);
1220                         bus_dmamap_unload(ring->data_dmat, data->map);
1221                         m_freem(data->m);
1222                         data->m = NULL;
1223                 }
1224                 if (data->map != NULL) {
1225                         bus_dmamap_destroy(ring->data_dmat, data->map);
1226                         data->map = NULL;
1227                 }
1228         }
1229         if (ring->data_dmat != NULL) {
1230                 bus_dma_tag_destroy(ring->data_dmat);
1231                 ring->data_dmat = NULL;
1232         }
1233 }
1234
1235 /*
1236  * High-level hardware frobbing routines
1237  */
1238
1239 static void
1240 iwm_enable_interrupts(struct iwm_softc *sc)
1241 {
1242         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1243         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1244 }
1245
1246 static void
1247 iwm_restore_interrupts(struct iwm_softc *sc)
1248 {
1249         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1250 }
1251
1252 static void
1253 iwm_disable_interrupts(struct iwm_softc *sc)
1254 {
1255         /* disable interrupts */
1256         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1257
1258         /* acknowledge all interrupts */
1259         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1260         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1261 }
1262
1263 static void
1264 iwm_ict_reset(struct iwm_softc *sc)
1265 {
1266         iwm_disable_interrupts(sc);
1267
1268         /* Reset ICT table. */
1269         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1270         sc->ict_cur = 0;
1271
1272         /* Set physical address of ICT table (4KB aligned). */
1273         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1274             IWM_CSR_DRAM_INT_TBL_ENABLE
1275             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1276             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1277             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1278
1279         /* Switch to ICT interrupt mode in driver. */
1280         sc->sc_flags |= IWM_FLAG_USE_ICT;
1281
1282         /* Re-enable interrupts. */
1283         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1284         iwm_enable_interrupts(sc);
1285 }
1286
1287 /*
1288  * Since this .. hard-resets things, it's time to actually
1289  * mark the first vap (if any) as having no mac context.
1290  * It's annoying, but since the driver is potentially being
1291  * stop/start'ed whilst active (thanks openbsd port!) we
1292  * have to correctly track this.
1293  */
1294 static void
1295 iwm_stop_device(struct iwm_softc *sc)
1296 {
1297         struct ieee80211com *ic = &sc->sc_ic;
1298         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1299         int chnl, qid;
1300         uint32_t mask = 0;
1301
1302         /* tell the device to stop sending interrupts */
1303         iwm_disable_interrupts(sc);
1304
1305         /*
1306          * FreeBSD-local: mark the first vap as not-uploaded,
1307          * so the next transition through auth/assoc
1308          * will correctly populate the MAC context.
1309          */
1310         if (vap) {
1311                 struct iwm_vap *iv = IWM_VAP(vap);
1312                 iv->is_uploaded = 0;
1313         }
1314
1315         /* device going down, Stop using ICT table */
1316         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1317
1318         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1319
1320         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1321
1322         if (iwm_nic_lock(sc)) {
1323                 /* Stop each Tx DMA channel */
1324                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1325                         IWM_WRITE(sc,
1326                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1327                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1328                 }
1329
1330                 /* Wait for DMA channels to be idle */
1331                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1332                     5000)) {
1333                         device_printf(sc->sc_dev,
1334                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1335                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1336                 }
1337                 iwm_nic_unlock(sc);
1338         }
1339         iwm_pcie_rx_stop(sc);
1340
1341         /* Stop RX ring. */
1342         iwm_reset_rx_ring(sc, &sc->rxq);
1343
1344         /* Reset all TX rings. */
1345         for (qid = 0; qid < nitems(sc->txq); qid++)
1346                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1347
1348         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1349                 /* Power-down device's busmaster DMA clocks */
1350                 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1351                     IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1352                 DELAY(5);
1353         }
1354
1355         /* Make sure (redundant) we've released our request to stay awake */
1356         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1357             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1358
1359         /* Stop the device, and put it in low power state */
1360         iwm_apm_stop(sc);
1361
1362         /* stop and reset the on-board processor */
1363         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1364         DELAY(1000);
1365
1366         /*
1367          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1368          * This is a bug in certain verions of the hardware.
1369          * Certain devices also keep sending HW RF kill interrupt all
1370          * the time, unless the interrupt is ACKed even if the interrupt
1371          * should be masked. Re-ACK all the interrupts here.
1372          */
1373         iwm_disable_interrupts(sc);
1374
1375         /*
1376          * Even if we stop the HW, we still want the RF kill
1377          * interrupt
1378          */
1379         iwm_enable_rfkill_int(sc);
1380         iwm_check_rfkill(sc);
1381 }
1382
1383 static void
1384 iwm_mvm_nic_config(struct iwm_softc *sc)
1385 {
1386         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1387         uint32_t reg_val = 0;
1388         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1389
1390         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1391             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1392         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1393             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1394         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1395             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1396
1397         /* SKU control */
1398         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1399             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1400         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1401             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1402
1403         /* radio configuration */
1404         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1405         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1406         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1407
1408         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1409
1410         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1411             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1412             radio_cfg_step, radio_cfg_dash);
1413
1414         /*
1415          * W/A : NIC is stuck in a reset state after Early PCIe power off
1416          * (PCIe power is lost before PERST# is asserted), causing ME FW
1417          * to lose ownership and not being able to obtain it back.
1418          */
1419         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1420                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1421                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1422                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1423         }
1424 }
1425
1426 static int
1427 iwm_nic_rx_init(struct iwm_softc *sc)
1428 {
1429         /*
1430          * Initialize RX ring.  This is from the iwn driver.
1431          */
1432         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1433
1434         /* Stop Rx DMA */
1435         iwm_pcie_rx_stop(sc);
1436
1437         if (!iwm_nic_lock(sc))
1438                 return EBUSY;
1439
1440         /* reset and flush pointers */
1441         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1442         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1443         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1444         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1445
1446         /* Set physical address of RX ring (256-byte aligned). */
1447         IWM_WRITE(sc,
1448             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1449
1450         /* Set physical address of RX status (16-byte aligned). */
1451         IWM_WRITE(sc,
1452             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1453
1454 #if defined(__DragonFly__)
1455         /* Force serialization (probably not needed but don't trust the HW) */
1456         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1457 #endif
1458
1459         /* Enable RX. */
1460         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1461             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1462             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1463             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1464             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1465             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1466             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1467             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1468
1469         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1470
1471         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1472         if (sc->cfg->host_interrupt_operation_mode)
1473                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1474
1475         /*
1476          * Thus sayeth el jefe (iwlwifi) via a comment:
1477          *
1478          * This value should initially be 0 (before preparing any
1479          * RBs), should be 8 after preparing the first 8 RBs (for example)
1480          */
1481         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1482
1483         iwm_nic_unlock(sc);
1484
1485         return 0;
1486 }
1487
1488 static int
1489 iwm_nic_tx_init(struct iwm_softc *sc)
1490 {
1491         int qid;
1492
1493         if (!iwm_nic_lock(sc))
1494                 return EBUSY;
1495
1496         /* Deactivate TX scheduler. */
1497         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1498
1499         /* Set physical address of "keep warm" page (16-byte aligned). */
1500         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1501
1502         /* Initialize TX rings. */
1503         for (qid = 0; qid < nitems(sc->txq); qid++) {
1504                 struct iwm_tx_ring *txq = &sc->txq[qid];
1505
1506                 /* Set physical address of TX ring (256-byte aligned). */
1507                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1508                     txq->desc_dma.paddr >> 8);
1509                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1510                     "%s: loading ring %d descriptors (%p) at %lx\n",
1511                     __func__,
1512                     qid, txq->desc,
1513                     (unsigned long) (txq->desc_dma.paddr >> 8));
1514         }
1515
1516         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1517
1518         iwm_nic_unlock(sc);
1519
1520         return 0;
1521 }
1522
1523 static int
1524 iwm_nic_init(struct iwm_softc *sc)
1525 {
1526         int error;
1527
1528         iwm_apm_init(sc);
1529         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1530                 iwm_set_pwr(sc);
1531
1532         iwm_mvm_nic_config(sc);
1533
1534         if ((error = iwm_nic_rx_init(sc)) != 0)
1535                 return error;
1536
1537         /*
1538          * Ditto for TX, from iwn
1539          */
1540         if ((error = iwm_nic_tx_init(sc)) != 0)
1541                 return error;
1542
1543         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1544             "%s: shadow registers enabled\n", __func__);
1545         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1546
1547         return 0;
1548 }
1549
1550 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1551         IWM_MVM_TX_FIFO_VO,
1552         IWM_MVM_TX_FIFO_VI,
1553         IWM_MVM_TX_FIFO_BE,
1554         IWM_MVM_TX_FIFO_BK,
1555 };
1556
1557 static int
1558 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1559 {
1560         if (!iwm_nic_lock(sc)) {
1561                 device_printf(sc->sc_dev,
1562                     "%s: cannot enable txq %d\n",
1563                     __func__,
1564                     qid);
1565                 return EBUSY;
1566         }
1567
1568         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1569
1570         if (qid == IWM_MVM_CMD_QUEUE) {
1571                 /* unactivate before configuration */
1572                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1573                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1574                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1575
1576                 iwm_nic_unlock(sc);
1577
1578                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1579
1580                 if (!iwm_nic_lock(sc)) {
1581                         device_printf(sc->sc_dev,
1582                             "%s: cannot enable txq %d\n", __func__, qid);
1583                         return EBUSY;
1584                 }
1585                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1586                 iwm_nic_unlock(sc);
1587
1588                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1589                 /* Set scheduler window size and frame limit. */
1590                 iwm_write_mem32(sc,
1591                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1592                     sizeof(uint32_t),
1593                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1594                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1595                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1596                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1597
1598                 if (!iwm_nic_lock(sc)) {
1599                         device_printf(sc->sc_dev,
1600                             "%s: cannot enable txq %d\n", __func__, qid);
1601                         return EBUSY;
1602                 }
1603                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1604                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1605                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1606                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1607                     IWM_SCD_QUEUE_STTS_REG_MSK);
1608         } else {
1609                 struct iwm_scd_txq_cfg_cmd cmd;
1610                 int error;
1611
1612                 iwm_nic_unlock(sc);
1613
1614                 memset(&cmd, 0, sizeof(cmd));
1615                 cmd.scd_queue = qid;
1616                 cmd.enable = 1;
1617                 cmd.sta_id = sta_id;
1618                 cmd.tx_fifo = fifo;
1619                 cmd.aggregate = 0;
1620                 cmd.window = IWM_FRAME_LIMIT;
1621
1622                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1623                     sizeof(cmd), &cmd);
1624                 if (error) {
1625                         device_printf(sc->sc_dev,
1626                             "cannot enable txq %d\n", qid);
1627                         return error;
1628                 }
1629
1630                 if (!iwm_nic_lock(sc))
1631                         return EBUSY;
1632         }
1633
1634         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1635             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1636
1637         iwm_nic_unlock(sc);
1638
1639         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1640             __func__, qid, fifo);
1641
1642         return 0;
1643 }
1644
1645 static int
1646 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1647 {
1648         int error, chnl;
1649
1650         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1651             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1652
1653         if (!iwm_nic_lock(sc))
1654                 return EBUSY;
1655
1656         iwm_ict_reset(sc);
1657
1658         iwm_nic_unlock(sc);
1659
1660         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1661         if (scd_base_addr != 0 &&
1662             scd_base_addr != sc->scd_base_addr) {
1663                 device_printf(sc->sc_dev,
1664                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1665                     __func__, sc->scd_base_addr, scd_base_addr);
1666         }
1667
1668         /* reset context data, TX status and translation data */
1669         error = iwm_write_mem(sc,
1670             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1671             NULL, clear_dwords);
1672         if (error)
1673                 return EBUSY;
1674
1675         if (!iwm_nic_lock(sc))
1676                 return EBUSY;
1677
1678         /* Set physical address of TX scheduler rings (1KB aligned). */
1679         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1680
1681         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1682
1683         iwm_nic_unlock(sc);
1684
1685         /* enable command channel */
1686         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1687         if (error)
1688                 return error;
1689
1690         if (!iwm_nic_lock(sc))
1691                 return EBUSY;
1692
1693         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1694
1695         /* Enable DMA channels. */
1696         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1697                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1698                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1699                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1700         }
1701
1702         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1703             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1704
1705         iwm_nic_unlock(sc);
1706
1707         /* Enable L1-Active */
1708         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1709                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1710                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1711         }
1712
1713         return error;
1714 }
1715
1716 /*
1717  * NVM read access and content parsing.  We do not support
1718  * external NVM or writing NVM.
1719  * iwlwifi/mvm/nvm.c
1720  */
1721
1722 /* Default NVM size to read */
1723 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1724
1725 #define IWM_NVM_WRITE_OPCODE 1
1726 #define IWM_NVM_READ_OPCODE 0
1727
1728 /* load nvm chunk response */
1729 enum {
1730         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1731         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1732 };
1733
1734 static int
1735 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1736         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1737 {
1738         struct iwm_nvm_access_cmd nvm_access_cmd = {
1739                 .offset = htole16(offset),
1740                 .length = htole16(length),
1741                 .type = htole16(section),
1742                 .op_code = IWM_NVM_READ_OPCODE,
1743         };
1744         struct iwm_nvm_access_resp *nvm_resp;
1745         struct iwm_rx_packet *pkt;
1746         struct iwm_host_cmd cmd = {
1747                 .id = IWM_NVM_ACCESS_CMD,
1748                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1749                 .data = { &nvm_access_cmd, },
1750         };
1751         int ret, bytes_read, offset_read;
1752         uint8_t *resp_data;
1753
1754         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1755
1756         ret = iwm_send_cmd(sc, &cmd);
1757         if (ret) {
1758                 device_printf(sc->sc_dev,
1759                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1760                 return ret;
1761         }
1762
1763         pkt = cmd.resp_pkt;
1764
1765         /* Extract NVM response */
1766         nvm_resp = (void *)pkt->data;
1767         ret = le16toh(nvm_resp->status);
1768         bytes_read = le16toh(nvm_resp->length);
1769         offset_read = le16toh(nvm_resp->offset);
1770         resp_data = nvm_resp->data;
1771         if (ret) {
1772                 if ((offset != 0) &&
1773                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1774                         /*
1775                          * meaning of NOT_VALID_ADDRESS:
1776                          * driver try to read chunk from address that is
1777                          * multiple of 2K and got an error since addr is empty.
1778                          * meaning of (offset != 0): driver already
1779                          * read valid data from another chunk so this case
1780                          * is not an error.
1781                          */
1782                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1783                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1784                                     offset);
1785                         *len = 0;
1786                         ret = 0;
1787                 } else {
1788                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1789                                     "NVM access command failed with status %d\n", ret);
1790                         ret = EIO;
1791                 }
1792                 goto exit;
1793         }
1794
1795         if (offset_read != offset) {
1796                 device_printf(sc->sc_dev,
1797                     "NVM ACCESS response with invalid offset %d\n",
1798                     offset_read);
1799                 ret = EINVAL;
1800                 goto exit;
1801         }
1802
1803         if (bytes_read > length) {
1804                 device_printf(sc->sc_dev,
1805                     "NVM ACCESS response with too much data "
1806                     "(%d bytes requested, %d bytes received)\n",
1807                     length, bytes_read);
1808                 ret = EINVAL;
1809                 goto exit;
1810         }
1811
1812         /* Write data to NVM */
1813         memcpy(data + offset, resp_data, bytes_read);
1814         *len = bytes_read;
1815
1816  exit:
1817         iwm_free_resp(sc, &cmd);
1818         return ret;
1819 }
1820
1821 /*
1822  * Reads an NVM section completely.
1823  * NICs prior to 7000 family don't have a real NVM, but just read
1824  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1825  * by uCode, we need to manually check in this case that we don't
1826  * overflow and try to read more than the EEPROM size.
1827  * For 7000 family NICs, we supply the maximal size we can read, and
1828  * the uCode fills the response with as much data as we can,
1829  * without overflowing, so no check is needed.
1830  */
1831 static int
1832 iwm_nvm_read_section(struct iwm_softc *sc,
1833         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1834 {
1835         uint16_t seglen, length, offset = 0;
1836         int ret;
1837
1838         /* Set nvm section read length */
1839         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1840
1841         seglen = length;
1842
1843         /* Read the NVM until exhausted (reading less than requested) */
1844         while (seglen == length) {
1845                 /* Check no memory assumptions fail and cause an overflow */
1846                 if ((size_read + offset + length) >
1847                     sc->cfg->eeprom_size) {
1848                         device_printf(sc->sc_dev,
1849                             "EEPROM size is too small for NVM\n");
1850                         return ENOBUFS;
1851                 }
1852
1853                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1854                 if (ret) {
1855                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1856                                     "Cannot read NVM from section %d offset %d, length %d\n",
1857                                     section, offset, length);
1858                         return ret;
1859                 }
1860                 offset += seglen;
1861         }
1862
1863         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1864                     "NVM section %d read completed\n", section);
1865         *len = offset;
1866         return 0;
1867 }
1868
1869 /* NVM offsets (in words) definitions */
1870 enum iwm_nvm_offsets {
1871         /* NVM HW-Section offset (in words) definitions */
1872         IWM_HW_ADDR = 0x15,
1873
1874 /* NVM SW-Section offset (in words) definitions */
1875         IWM_NVM_SW_SECTION = 0x1C0,
1876         IWM_NVM_VERSION = 0,
1877         IWM_RADIO_CFG = 1,
1878         IWM_SKU = 2,
1879         IWM_N_HW_ADDRS = 3,
1880         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1881
1882 /* NVM calibration section offset (in words) definitions */
1883         IWM_NVM_CALIB_SECTION = 0x2B8,
1884         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1885 };
1886
1887 enum iwm_8000_nvm_offsets {
1888         /* NVM HW-Section offset (in words) definitions */
1889         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1890         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1891         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1892         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1893         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1894
1895         /* NVM SW-Section offset (in words) definitions */
1896         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1897         IWM_NVM_VERSION_8000 = 0,
1898         IWM_RADIO_CFG_8000 = 0,
1899         IWM_SKU_8000 = 2,
1900         IWM_N_HW_ADDRS_8000 = 3,
1901
1902         /* NVM REGULATORY -Section offset (in words) definitions */
1903         IWM_NVM_CHANNELS_8000 = 0,
1904         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1905         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1906         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1907
1908         /* NVM calibration section offset (in words) definitions */
1909         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1910         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1911 };
1912
1913 /* SKU Capabilities (actual values from NVM definition) */
1914 enum nvm_sku_bits {
1915         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1916         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1917         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1918         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1919 };
1920
1921 /* radio config bits (actual values from NVM definition) */
1922 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1923 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1924 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1925 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1926 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1927 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1928
1929 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1930 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1931 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1932 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1933 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1934 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1935
1936 #define DEFAULT_MAX_TX_POWER 16
1937
1938 /**
1939  * enum iwm_nvm_channel_flags - channel flags in NVM
1940  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1941  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1942  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1943  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1944  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1945  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1946  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1947  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1948  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1949  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1950  */
1951 enum iwm_nvm_channel_flags {
1952         IWM_NVM_CHANNEL_VALID = (1 << 0),
1953         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1954         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1955         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1956         IWM_NVM_CHANNEL_DFS = (1 << 7),
1957         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1958         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1959         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1960         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1961 };
1962
1963 /*
1964  * Translate EEPROM flags to net80211.
1965  */
1966 static uint32_t
1967 iwm_eeprom_channel_flags(uint16_t ch_flags)
1968 {
1969         uint32_t nflags;
1970
1971         nflags = 0;
1972         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1973                 nflags |= IEEE80211_CHAN_PASSIVE;
1974         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1975                 nflags |= IEEE80211_CHAN_NOADHOC;
1976         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1977                 nflags |= IEEE80211_CHAN_DFS;
1978                 /* Just in case. */
1979                 nflags |= IEEE80211_CHAN_NOADHOC;
1980         }
1981
1982         return (nflags);
1983 }
1984
1985 static void
1986 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1987     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1988     const uint8_t bands[])
1989 {
1990         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1991         uint32_t nflags;
1992         uint16_t ch_flags;
1993         uint8_t ieee;
1994         int error;
1995
1996         for (; ch_idx < ch_num; ch_idx++) {
1997                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1998                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1999                         ieee = iwm_nvm_channels[ch_idx];
2000                 else
2001                         ieee = iwm_nvm_channels_8000[ch_idx];
2002
2003                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2004                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2005                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2006                             ieee, ch_flags,
2007                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2008                             "5.2" : "2.4");
2009                         continue;
2010                 }
2011
2012                 nflags = iwm_eeprom_channel_flags(ch_flags);
2013                 error = ieee80211_add_channel(chans, maxchans, nchans,
2014                     ieee, 0, 0, nflags, bands);
2015                 if (error != 0)
2016                         break;
2017
2018                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2019                     "Ch. %d Flags %x [%sGHz] - Added\n",
2020                     ieee, ch_flags,
2021                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2022                     "5.2" : "2.4");
2023         }
2024 }
2025
2026 static void
2027 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2028     struct ieee80211_channel chans[])
2029 {
2030         struct iwm_softc *sc = ic->ic_softc;
2031         struct iwm_nvm_data *data = sc->nvm_data;
2032         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2033         size_t ch_num;
2034
2035         memset(bands, 0, sizeof(bands));
2036         /* 1-13: 11b/g channels. */
2037         setbit(bands, IEEE80211_MODE_11B);
2038         setbit(bands, IEEE80211_MODE_11G);
2039         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2040             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2041
2042         /* 14: 11b channel only. */
2043         clrbit(bands, IEEE80211_MODE_11G);
2044         iwm_add_channel_band(sc, chans, maxchans, nchans,
2045             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2046
2047         if (data->sku_cap_band_52GHz_enable) {
2048                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2049                         ch_num = nitems(iwm_nvm_channels);
2050                 else
2051                         ch_num = nitems(iwm_nvm_channels_8000);
2052                 memset(bands, 0, sizeof(bands));
2053                 setbit(bands, IEEE80211_MODE_11A);
2054                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2055                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2056         }
2057 }
2058
2059 static void
2060 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2061         const uint16_t *mac_override, const uint16_t *nvm_hw)
2062 {
2063         const uint8_t *hw_addr;
2064
2065         if (mac_override) {
2066                 static const uint8_t reserved_mac[] = {
2067                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2068                 };
2069
2070                 hw_addr = (const uint8_t *)(mac_override +
2071                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2072
2073                 /*
2074                  * Store the MAC address from MAO section.
2075                  * No byte swapping is required in MAO section
2076                  */
2077                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2078
2079                 /*
2080                  * Force the use of the OTP MAC address in case of reserved MAC
2081                  * address in the NVM, or if address is given but invalid.
2082                  */
2083                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2084                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2085                     iwm_is_valid_ether_addr(data->hw_addr) &&
2086                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2087                         return;
2088
2089                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2090                     "%s: mac address from nvm override section invalid\n",
2091                     __func__);
2092         }
2093
2094         if (nvm_hw) {
2095                 /* read the mac address from WFMP registers */
2096                 uint32_t mac_addr0 =
2097                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2098                 uint32_t mac_addr1 =
2099                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2100
2101                 hw_addr = (const uint8_t *)&mac_addr0;
2102                 data->hw_addr[0] = hw_addr[3];
2103                 data->hw_addr[1] = hw_addr[2];
2104                 data->hw_addr[2] = hw_addr[1];
2105                 data->hw_addr[3] = hw_addr[0];
2106
2107                 hw_addr = (const uint8_t *)&mac_addr1;
2108                 data->hw_addr[4] = hw_addr[1];
2109                 data->hw_addr[5] = hw_addr[0];
2110
2111                 return;
2112         }
2113
2114         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2115         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2116 }
2117
2118 static int
2119 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2120             const uint16_t *phy_sku)
2121 {
2122         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2123                 return le16_to_cpup(nvm_sw + IWM_SKU);
2124
2125         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2126 }
2127
2128 static int
2129 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2130 {
2131         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2132                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2133         else
2134                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2135                                                 IWM_NVM_VERSION_8000));
2136 }
2137
2138 static int
2139 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2140                   const uint16_t *phy_sku)
2141 {
2142         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2143                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2144
2145         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2146 }
2147
2148 static int
2149 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2150 {
2151         int n_hw_addr;
2152
2153         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2154                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2155
2156         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2157
2158         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2159 }
2160
2161 static void
2162 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2163                   uint32_t radio_cfg)
2164 {
2165         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2166                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2167                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2168                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2169                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2170                 return;
2171         }
2172
2173         /* set the radio configuration for family 8000 */
2174         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2175         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2176         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2177         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2178         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2179         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2180 }
2181
2182 static int
2183 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2184                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2185 {
2186 #ifdef notyet /* for FAMILY 9000 */
2187         if (cfg->mac_addr_from_csr) {
2188                 iwm_set_hw_address_from_csr(sc, data);
2189         } else
2190 #endif
2191         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2192                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2193
2194                 /* The byte order is little endian 16 bit, meaning 214365 */
2195                 data->hw_addr[0] = hw_addr[1];
2196                 data->hw_addr[1] = hw_addr[0];
2197                 data->hw_addr[2] = hw_addr[3];
2198                 data->hw_addr[3] = hw_addr[2];
2199                 data->hw_addr[4] = hw_addr[5];
2200                 data->hw_addr[5] = hw_addr[4];
2201         } else {
2202                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2203         }
2204
2205         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2206                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2207                 return EINVAL;
2208         }
2209
2210         return 0;
2211 }
2212
2213 static struct iwm_nvm_data *
2214 iwm_parse_nvm_data(struct iwm_softc *sc,
2215                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2216                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2217                    const uint16_t *phy_sku, const uint16_t *regulatory)
2218 {
2219         struct iwm_nvm_data *data;
2220         uint32_t sku, radio_cfg;
2221
2222         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2223                 data = kmalloc(sizeof(*data) +
2224                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2225                     M_DEVBUF, M_WAITOK | M_ZERO);
2226         } else {
2227                 data = kmalloc(sizeof(*data) +
2228                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2229                     M_DEVBUF, M_WAITOK | M_ZERO);
2230         }
2231         if (!data)
2232                 return NULL;
2233
2234         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2235
2236         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2237         iwm_set_radio_cfg(sc, data, radio_cfg);
2238
2239         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2240         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2241         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2242         data->sku_cap_11n_enable = 0;
2243
2244         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2245
2246         /* If no valid mac address was found - bail out */
2247         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2248                 kfree(data, M_DEVBUF);
2249                 return NULL;
2250         }
2251
2252         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2253                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2254                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2255         } else {
2256                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2257                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2258         }
2259
2260         return data;
2261 }
2262
2263 static void
2264 iwm_free_nvm_data(struct iwm_nvm_data *data)
2265 {
2266         if (data != NULL)
2267                 kfree(data, M_DEVBUF);
2268 }
2269
2270 static struct iwm_nvm_data *
2271 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2272 {
2273         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2274
2275         /* Checking for required sections */
2276         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2277                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2278                     !sections[sc->cfg->nvm_hw_section_num].data) {
2279                         device_printf(sc->sc_dev,
2280                             "Can't parse empty OTP/NVM sections\n");
2281                         return NULL;
2282                 }
2283         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2284                 /* SW and REGULATORY sections are mandatory */
2285                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2286                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2287                         device_printf(sc->sc_dev,
2288                             "Can't parse empty OTP/NVM sections\n");
2289                         return NULL;
2290                 }
2291                 /* MAC_OVERRIDE or at least HW section must exist */
2292                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2293                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2294                         device_printf(sc->sc_dev,
2295                             "Can't parse mac_address, empty sections\n");
2296                         return NULL;
2297                 }
2298
2299                 /* PHY_SKU section is mandatory in B0 */
2300                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2301                         device_printf(sc->sc_dev,
2302                             "Can't parse phy_sku in B0, empty sections\n");
2303                         return NULL;
2304                 }
2305         } else {
2306                 panic("unknown device family %d\n", sc->cfg->device_family);
2307         }
2308
2309         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2310         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2311         calib = (const uint16_t *)
2312             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2313         regulatory = (const uint16_t *)
2314             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2315         mac_override = (const uint16_t *)
2316             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2317         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2318
2319         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2320             phy_sku, regulatory);
2321 }
2322
2323 static int
2324 iwm_nvm_init(struct iwm_softc *sc)
2325 {
2326         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2327         int i, ret, section;
2328         uint32_t size_read = 0;
2329         uint8_t *nvm_buffer, *temp;
2330         uint16_t len;
2331
2332         memset(nvm_sections, 0, sizeof(nvm_sections));
2333
2334         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2335                 return EINVAL;
2336
2337         /* load NVM values from nic */
2338         /* Read From FW NVM */
2339         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2340
2341         nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2342             M_INTWAIT | M_ZERO);
2343         if (!nvm_buffer)
2344                 return ENOMEM;
2345         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2346                 /* we override the constness for initial read */
2347                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2348                                            &len, size_read);
2349                 if (ret)
2350                         continue;
2351                 size_read += len;
2352                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2353                 if (!temp) {
2354                         ret = ENOMEM;
2355                         break;
2356                 }
2357                 memcpy(temp, nvm_buffer, len);
2358
2359                 nvm_sections[section].data = temp;
2360                 nvm_sections[section].length = len;
2361         }
2362         if (!size_read)
2363                 device_printf(sc->sc_dev, "OTP is blank\n");
2364         kfree(nvm_buffer, M_DEVBUF);
2365
2366         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2367         if (!sc->nvm_data)
2368                 return EINVAL;
2369         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2370                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2371
2372         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2373                 if (nvm_sections[i].data != NULL)
2374                         kfree(nvm_sections[i].data, M_DEVBUF);
2375         }
2376
2377         return 0;
2378 }
2379
2380 static int
2381 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2382         const struct iwm_fw_desc *section)
2383 {
2384         struct iwm_dma_info *dma = &sc->fw_dma;
2385         uint8_t *v_addr;
2386         bus_addr_t p_addr;
2387         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2388         int ret = 0;
2389
2390         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2391                     "%s: [%d] uCode section being loaded...\n",
2392                     __func__, section_num);
2393
2394         v_addr = dma->vaddr;
2395         p_addr = dma->paddr;
2396
2397         for (offset = 0; offset < section->len; offset += chunk_sz) {
2398                 uint32_t copy_size, dst_addr;
2399                 int extended_addr = FALSE;
2400
2401                 copy_size = MIN(chunk_sz, section->len - offset);
2402                 dst_addr = section->offset + offset;
2403
2404                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2405                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2406                         extended_addr = TRUE;
2407
2408                 if (extended_addr)
2409                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2410                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2411
2412                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2413                     copy_size);
2414                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2415                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2416                                                    copy_size);
2417
2418                 if (extended_addr)
2419                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2420                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2421
2422                 if (ret) {
2423                         device_printf(sc->sc_dev,
2424                             "%s: Could not load the [%d] uCode section\n",
2425                             __func__, section_num);
2426                         break;
2427                 }
2428         }
2429
2430         return ret;
2431 }
2432
2433 /*
2434  * ucode
2435  */
2436 static int
2437 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2438                              bus_addr_t phy_addr, uint32_t byte_cnt)
2439 {
2440         int ret;
2441
2442         sc->sc_fw_chunk_done = 0;
2443
2444         if (!iwm_nic_lock(sc))
2445                 return EBUSY;
2446
2447         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2448             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2449
2450         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2451             dst_addr);
2452
2453         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2454             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2455
2456         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2457             (iwm_get_dma_hi_addr(phy_addr)
2458              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2459
2460         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2461             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2462             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2463             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2464
2465         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2466             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2467             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2468             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2469
2470         iwm_nic_unlock(sc);
2471
2472         /* wait up to 5s for this segment to load */
2473         ret = 0;
2474         while (!sc->sc_fw_chunk_done) {
2475 #if defined(__DragonFly__)
2476                 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2477 #else
2478                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2479 #endif
2480                 if (ret)
2481                         break;
2482         }
2483
2484         if (ret != 0) {
2485                 device_printf(sc->sc_dev,
2486                     "fw chunk addr 0x%x len %d failed to load\n",
2487                     dst_addr, byte_cnt);
2488                 return ETIMEDOUT;
2489         }
2490
2491         return 0;
2492 }
2493
2494 static int
2495 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2496         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2497 {
2498         int shift_param;
2499         int i, ret = 0, sec_num = 0x1;
2500         uint32_t val, last_read_idx = 0;
2501
2502         if (cpu == 1) {
2503                 shift_param = 0;
2504                 *first_ucode_section = 0;
2505         } else {
2506                 shift_param = 16;
2507                 (*first_ucode_section)++;
2508         }
2509
2510         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2511                 last_read_idx = i;
2512
2513                 /*
2514                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2515                  * CPU1 to CPU2.
2516                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2517                  * CPU2 non paged to CPU2 paging sec.
2518                  */
2519                 if (!image->fw_sect[i].data ||
2520                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2521                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2522                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2523                                     "Break since Data not valid or Empty section, sec = %d\n",
2524                                     i);
2525                         break;
2526                 }
2527                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2528                 if (ret)
2529                         return ret;
2530
2531                 /* Notify the ucode of the loaded section number and status */
2532                 if (iwm_nic_lock(sc)) {
2533                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2534                         val = val | (sec_num << shift_param);
2535                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2536                         sec_num = (sec_num << 1) | 0x1;
2537                         iwm_nic_unlock(sc);
2538                 }
2539         }
2540
2541         *first_ucode_section = last_read_idx;
2542
2543         iwm_enable_interrupts(sc);
2544
2545         if (iwm_nic_lock(sc)) {
2546                 if (cpu == 1)
2547                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2548                 else
2549                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2550                 iwm_nic_unlock(sc);
2551         }
2552
2553         return 0;
2554 }
2555
2556 static int
2557 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2558         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2559 {
2560         int shift_param;
2561         int i, ret = 0;
2562         uint32_t last_read_idx = 0;
2563
2564         if (cpu == 1) {
2565                 shift_param = 0;
2566                 *first_ucode_section = 0;
2567         } else {
2568                 shift_param = 16;
2569                 (*first_ucode_section)++;
2570         }
2571
2572         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2573                 last_read_idx = i;
2574
2575                 /*
2576                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2577                  * CPU1 to CPU2.
2578                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2579                  * CPU2 non paged to CPU2 paging sec.
2580                  */
2581                 if (!image->fw_sect[i].data ||
2582                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2583                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2584                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2585                                     "Break since Data not valid or Empty section, sec = %d\n",
2586                                      i);
2587                         break;
2588                 }
2589
2590                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2591                 if (ret)
2592                         return ret;
2593         }
2594
2595         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2596                 iwm_set_bits_prph(sc,
2597                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2598                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2599                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2600                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2601                                         shift_param);
2602
2603         *first_ucode_section = last_read_idx;
2604
2605         return 0;
2606
2607 }
2608
2609 static int
2610 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2611         const struct iwm_fw_sects *image)
2612 {
2613         int ret = 0;
2614         int first_ucode_section;
2615
2616         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2617                      image->is_dual_cpus ? "Dual" : "Single");
2618
2619         /* load to FW the binary non secured sections of CPU1 */
2620         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2621         if (ret)
2622                 return ret;
2623
2624         if (image->is_dual_cpus) {
2625                 /* set CPU2 header address */
2626                 iwm_write_prph(sc,
2627                                IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2628                                IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2629
2630                 /* load to FW the binary sections of CPU2 */
2631                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2632                                                  &first_ucode_section);
2633                 if (ret)
2634                         return ret;
2635         }
2636
2637         iwm_enable_interrupts(sc);
2638
2639         /* release CPU reset */
2640         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2641
2642         return 0;
2643 }
2644
2645 int
2646 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2647         const struct iwm_fw_sects *image)
2648 {
2649         int ret = 0;
2650         int first_ucode_section;
2651
2652         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2653                     image->is_dual_cpus ? "Dual" : "Single");
2654
2655         /* configure the ucode to be ready to get the secured image */
2656         /* release CPU reset */
2657         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2658
2659         /* load to FW the binary Secured sections of CPU1 */
2660         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2661             &first_ucode_section);
2662         if (ret)
2663                 return ret;
2664
2665         /* load to FW the binary sections of CPU2 */
2666         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2667             &first_ucode_section);
2668 }
2669
2670 /* XXX Get rid of this definition */
2671 static inline void
2672 iwm_enable_fw_load_int(struct iwm_softc *sc)
2673 {
2674         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2675         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2676         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2677 }
2678
2679 /* XXX Add proper rfkill support code */
2680 static int
2681 iwm_start_fw(struct iwm_softc *sc,
2682         const struct iwm_fw_sects *fw)
2683 {
2684         int ret;
2685
2686         /* This may fail if AMT took ownership of the device */
2687         if (iwm_prepare_card_hw(sc)) {
2688                 device_printf(sc->sc_dev,
2689                     "%s: Exit HW not ready\n", __func__);
2690                 ret = EIO;
2691                 goto out;
2692         }
2693
2694         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2695
2696         iwm_disable_interrupts(sc);
2697
2698         /* make sure rfkill handshake bits are cleared */
2699         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2700         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2701             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2702
2703         /* clear (again), then enable host interrupts */
2704         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2705
2706         ret = iwm_nic_init(sc);
2707         if (ret) {
2708                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2709                 goto out;
2710         }
2711
2712         /*
2713          * Now, we load the firmware and don't want to be interrupted, even
2714          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2715          * FH_TX interrupt which is needed to load the firmware). If the
2716          * RF-Kill switch is toggled, we will find out after having loaded
2717          * the firmware and return the proper value to the caller.
2718          */
2719         iwm_enable_fw_load_int(sc);
2720
2721         /* really make sure rfkill handshake bits are cleared */
2722         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2723         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2724
2725         /* Load the given image to the HW */
2726         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2727                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2728         else
2729                 ret = iwm_pcie_load_given_ucode(sc, fw);
2730
2731         /* XXX re-check RF-Kill state */
2732
2733 out:
2734         return ret;
2735 }
2736
2737 static int
2738 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2739 {
2740         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2741                 .valid = htole32(valid_tx_ant),
2742         };
2743
2744         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2745             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2746 }
2747
2748 static int
2749 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2750 {
2751         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2752         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2753
2754         /* Set parameters */
2755         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2756         phy_cfg_cmd.calib_control.event_trigger =
2757             sc->sc_default_calib[ucode_type].event_trigger;
2758         phy_cfg_cmd.calib_control.flow_trigger =
2759             sc->sc_default_calib[ucode_type].flow_trigger;
2760
2761         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2762             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2763         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2764             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2765 }
2766
2767 static int
2768 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2769 {
2770         struct iwm_mvm_alive_data *alive_data = data;
2771         struct iwm_mvm_alive_resp_ver1 *palive1;
2772         struct iwm_mvm_alive_resp_ver2 *palive2;
2773         struct iwm_mvm_alive_resp *palive;
2774
2775         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2776                 palive1 = (void *)pkt->data;
2777
2778                 sc->support_umac_log = FALSE;
2779                 sc->error_event_table =
2780                         le32toh(palive1->error_event_table_ptr);
2781                 sc->log_event_table =
2782                         le32toh(palive1->log_event_table_ptr);
2783                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2784
2785                 alive_data->valid = le16toh(palive1->status) ==
2786                                     IWM_ALIVE_STATUS_OK;
2787                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2788                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2789                              le16toh(palive1->status), palive1->ver_type,
2790                              palive1->ver_subtype, palive1->flags);
2791         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2792                 palive2 = (void *)pkt->data;
2793                 sc->error_event_table =
2794                         le32toh(palive2->error_event_table_ptr);
2795                 sc->log_event_table =
2796                         le32toh(palive2->log_event_table_ptr);
2797                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2798                 sc->umac_error_event_table =
2799                         le32toh(palive2->error_info_addr);
2800
2801                 alive_data->valid = le16toh(palive2->status) ==
2802                                     IWM_ALIVE_STATUS_OK;
2803                 if (sc->umac_error_event_table)
2804                         sc->support_umac_log = TRUE;
2805
2806                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2807                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2808                             le16toh(palive2->status), palive2->ver_type,
2809                             palive2->ver_subtype, palive2->flags);
2810
2811                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2812                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2813                             palive2->umac_major, palive2->umac_minor);
2814         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2815                 palive = (void *)pkt->data;
2816
2817                 sc->error_event_table =
2818                         le32toh(palive->error_event_table_ptr);
2819                 sc->log_event_table =
2820                         le32toh(palive->log_event_table_ptr);
2821                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2822                 sc->umac_error_event_table =
2823                         le32toh(palive->error_info_addr);
2824
2825                 alive_data->valid = le16toh(palive->status) ==
2826                                     IWM_ALIVE_STATUS_OK;
2827                 if (sc->umac_error_event_table)
2828                         sc->support_umac_log = TRUE;
2829
2830                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2831                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2832                             le16toh(palive->status), palive->ver_type,
2833                             palive->ver_subtype, palive->flags);
2834
2835                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2836                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2837                             le32toh(palive->umac_major),
2838                             le32toh(palive->umac_minor));
2839         }
2840
2841         return TRUE;
2842 }
2843
2844 static int
2845 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2846         struct iwm_rx_packet *pkt, void *data)
2847 {
2848         struct iwm_phy_db *phy_db = data;
2849
2850         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2851                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2852                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2853                             __func__, pkt->hdr.code);
2854                 }
2855                 return TRUE;
2856         }
2857
2858         if (iwm_phy_db_set_section(phy_db, pkt)) {
2859                 device_printf(sc->sc_dev,
2860                     "%s: iwm_phy_db_set_section failed\n", __func__);
2861         }
2862
2863         return FALSE;
2864 }
2865
2866 static int
2867 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2868         enum iwm_ucode_type ucode_type)
2869 {
2870         struct iwm_notification_wait alive_wait;
2871         struct iwm_mvm_alive_data alive_data;
2872         const struct iwm_fw_sects *fw;
2873         enum iwm_ucode_type old_type = sc->cur_ucode;
2874         int error;
2875         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2876
2877         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2878                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2879                         error);
2880                 return error;
2881         }
2882         fw = &sc->sc_fw.fw_sects[ucode_type];
2883         sc->cur_ucode = ucode_type;
2884         sc->ucode_loaded = FALSE;
2885
2886         memset(&alive_data, 0, sizeof(alive_data));
2887         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2888                                    alive_cmd, NELEM(alive_cmd),
2889                                    iwm_alive_fn, &alive_data);
2890
2891         error = iwm_start_fw(sc, fw);
2892         if (error) {
2893                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2894                 sc->cur_ucode = old_type;
2895                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2896                 return error;
2897         }
2898
2899         /*
2900          * Some things may run in the background now, but we
2901          * just wait for the ALIVE notification here.
2902          */
2903         IWM_UNLOCK(sc);
2904         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2905                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2906         IWM_LOCK(sc);
2907         if (error) {
2908                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2909                         device_printf(sc->sc_dev,
2910                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2911                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2912                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2913                 }
2914                 sc->cur_ucode = old_type;
2915                 return error;
2916         }
2917
2918         if (!alive_data.valid) {
2919                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2920                     __func__);
2921                 sc->cur_ucode = old_type;
2922                 return EIO;
2923         }
2924
2925         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2926
2927         /*
2928          * configure and operate fw paging mechanism.
2929          * driver configures the paging flow only once, CPU2 paging image
2930          * included in the IWM_UCODE_INIT image.
2931          */
2932         if (fw->paging_mem_size) {
2933                 error = iwm_save_fw_paging(sc, fw);
2934                 if (error) {
2935                         device_printf(sc->sc_dev,
2936                             "%s: failed to save the FW paging image\n",
2937                             __func__);
2938                         return error;
2939                 }
2940
2941                 error = iwm_send_paging_cmd(sc, fw);
2942                 if (error) {
2943                         device_printf(sc->sc_dev,
2944                             "%s: failed to send the paging cmd\n", __func__);
2945                         iwm_free_fw_paging(sc);
2946                         return error;
2947                 }
2948         }
2949
2950         if (!error)
2951                 sc->ucode_loaded = TRUE;
2952         return error;
2953 }
2954
2955 /*
2956  * mvm misc bits
2957  */
2958
2959 static int
2960 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2961 {
2962         struct iwm_notification_wait calib_wait;
2963         static const uint16_t init_complete[] = {
2964                 IWM_INIT_COMPLETE_NOTIF,
2965                 IWM_CALIB_RES_NOTIF_PHY_DB
2966         };
2967         int ret;
2968
2969         /* do not operate with rfkill switch turned on */
2970         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2971                 device_printf(sc->sc_dev,
2972                     "radio is disabled by hardware switch\n");
2973                 return EPERM;
2974         }
2975
2976         iwm_init_notification_wait(sc->sc_notif_wait,
2977                                    &calib_wait,
2978                                    init_complete,
2979                                    NELEM(init_complete),
2980                                    iwm_wait_phy_db_entry,
2981                                    sc->sc_phy_db);
2982
2983         /* Will also start the device */
2984         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2985         if (ret) {
2986                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2987                     ret);
2988                 goto error;
2989         }
2990
2991         if (justnvm) {
2992                 /* Read nvm */
2993                 ret = iwm_nvm_init(sc);
2994                 if (ret) {
2995                         device_printf(sc->sc_dev, "failed to read nvm\n");
2996                         goto error;
2997                 }
2998                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2999                 goto error;
3000         }
3001
3002         ret = iwm_send_bt_init_conf(sc);
3003         if (ret) {
3004                 device_printf(sc->sc_dev,
3005                     "failed to send bt coex configuration: %d\n", ret);
3006                 goto error;
3007         }
3008
3009         /* Init Smart FIFO. */
3010         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3011         if (ret)
3012                 goto error;
3013
3014         /* Send TX valid antennas before triggering calibrations */
3015         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3016         if (ret) {
3017                 device_printf(sc->sc_dev,
3018                     "failed to send antennas before calibration: %d\n", ret);
3019                 goto error;
3020         }
3021
3022         /*
3023          * Send phy configurations command to init uCode
3024          * to start the 16.0 uCode init image internal calibrations.
3025          */
3026         ret = iwm_send_phy_cfg_cmd(sc);
3027         if (ret) {
3028                 device_printf(sc->sc_dev,
3029                     "%s: Failed to run INIT calibrations: %d\n",
3030                     __func__, ret);
3031                 goto error;
3032         }
3033
3034         /*
3035          * Nothing to do but wait for the init complete notification
3036          * from the firmware.
3037          */
3038         IWM_UNLOCK(sc);
3039         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3040             IWM_MVM_UCODE_CALIB_TIMEOUT);
3041         IWM_LOCK(sc);
3042
3043
3044         goto out;
3045
3046 error:
3047         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3048 out:
3049         return ret;
3050 }
3051
3052 /*
3053  * receive side
3054  */
3055
3056 /* (re)stock rx ring, called at init-time and at runtime */
3057 static int
3058 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3059 {
3060         struct iwm_rx_ring *ring = &sc->rxq;
3061         struct iwm_rx_data *data = &ring->data[idx];
3062         struct mbuf *m;
3063         bus_dmamap_t dmamap = NULL;
3064         bus_dma_segment_t seg;
3065         int nsegs, error;
3066
3067         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3068         if (m == NULL)
3069                 return ENOBUFS;
3070
3071         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3072 #if defined(__DragonFly__)
3073         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3074             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3075 #else
3076         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3077             &seg, &nsegs, BUS_DMA_NOWAIT);
3078 #endif
3079         if (error != 0) {
3080                 device_printf(sc->sc_dev,
3081                     "%s: can't map mbuf, error %d\n", __func__, error);
3082                 goto fail;
3083         }
3084
3085         if (data->m != NULL)
3086                 bus_dmamap_unload(ring->data_dmat, data->map);
3087
3088         /* Swap ring->spare_map with data->map */
3089         dmamap = data->map;
3090         data->map = ring->spare_map;
3091         ring->spare_map = dmamap;
3092
3093         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3094         data->m = m;
3095
3096         /* Update RX descriptor. */
3097         KKASSERT((seg.ds_addr & 255) == 0);
3098         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3099         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3100             BUS_DMASYNC_PREWRITE);
3101
3102         return 0;
3103 fail:
3104         m_freem(m);
3105         return error;
3106 }
3107
3108 #define IWM_RSSI_OFFSET 50
3109 static int
3110 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3111 {
3112         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3113         uint32_t agc_a, agc_b;
3114         uint32_t val;
3115
3116         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3117         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3118         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3119
3120         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3121         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3122         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3123
3124         /*
3125          * dBm = rssi dB - agc dB - constant.
3126          * Higher AGC (higher radio gain) means lower signal.
3127          */
3128         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3129         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3130         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3131
3132         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3133             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3134             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3135
3136         return max_rssi_dbm;
3137 }
3138
3139 /*
3140  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3141  * values are reported by the fw as positive values - need to negate
3142  * to obtain their dBM.  Account for missing antennas by replacing 0
3143  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3144  */
3145 static int
3146 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3147 {
3148         int energy_a, energy_b, energy_c, max_energy;
3149         uint32_t val;
3150
3151         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3152         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3153             IWM_RX_INFO_ENERGY_ANT_A_POS;
3154         energy_a = energy_a ? -energy_a : -256;
3155         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3156             IWM_RX_INFO_ENERGY_ANT_B_POS;
3157         energy_b = energy_b ? -energy_b : -256;
3158         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3159             IWM_RX_INFO_ENERGY_ANT_C_POS;
3160         energy_c = energy_c ? -energy_c : -256;
3161         max_energy = MAX(energy_a, energy_b);
3162         max_energy = MAX(max_energy, energy_c);
3163
3164         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3165             "energy In A %d B %d C %d , and max %d\n",
3166             energy_a, energy_b, energy_c, max_energy);
3167
3168         return max_energy;
3169 }
3170
3171 static void
3172 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3173         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3174 {
3175         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3176
3177         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3178         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3179
3180         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3181 }
3182
3183 /*
3184  * Retrieve the average noise (in dBm) among receivers.
3185  */
3186 static int
3187 iwm_get_noise(struct iwm_softc *sc,
3188         const struct iwm_mvm_statistics_rx_non_phy *stats)
3189 {
3190         int i, total, nbant, noise;
3191
3192         total = nbant = noise = 0;
3193         for (i = 0; i < 3; i++) {
3194                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3195                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3196                     __func__, i, noise);
3197
3198                 if (noise) {
3199                         total += noise;
3200                         nbant++;
3201                 }
3202         }
3203
3204         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3205             __func__, nbant, total);
3206 #if 0
3207         /* There should be at least one antenna but check anyway. */
3208         return (nbant == 0) ? -127 : (total / nbant) - 107;
3209 #else
3210         /* For now, just hard-code it to -96 to be safe */
3211         return (-96);
3212 #endif
3213 }
3214
3215 /*
3216  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3217  *
3218  * Handles the actual data of the Rx packet from the fw
3219  */
3220 static void
3221 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3222         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3223 {
3224         struct ieee80211com *ic = &sc->sc_ic;
3225         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3226         struct ieee80211_frame *wh;
3227         struct ieee80211_node *ni;
3228         struct ieee80211_rx_stats rxs;
3229         struct mbuf *m;
3230         struct iwm_rx_phy_info *phy_info;
3231         struct iwm_rx_mpdu_res_start *rx_res;
3232         uint32_t len;
3233         uint32_t rx_pkt_status;
3234         int rssi;
3235
3236         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3237
3238         phy_info = &sc->sc_last_phy_info;
3239         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3240         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3241         len = le16toh(rx_res->byte_count);
3242         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3243
3244         m = data->m;
3245         m->m_data = pkt->data + sizeof(*rx_res);
3246         m->m_pkthdr.len = m->m_len = len;
3247
3248         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3249                 device_printf(sc->sc_dev,
3250                     "dsp size out of range [0,20]: %d\n",
3251                     phy_info->cfg_phy_cnt);
3252                 return;
3253         }
3254
3255         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3256             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3257                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3258                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3259                 return; /* drop */
3260         }
3261
3262         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3263                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3264         } else {
3265                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3266         }
3267         /* Note: RSSI is absolute (ie a -ve value) */
3268         if (rssi < IWM_MIN_DBM)
3269                 rssi = IWM_MIN_DBM;
3270         else if (rssi > IWM_MAX_DBM)
3271                 rssi = IWM_MAX_DBM;
3272
3273         /* Map it to relative value */
3274         rssi = rssi - sc->sc_noise;
3275
3276         /* replenish ring for the buffer we're going to feed to the sharks */
3277         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3278                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3279                     __func__);
3280                 return;
3281         }
3282
3283         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3284             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3285
3286         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3287
3288         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3289             "%s: phy_info: channel=%d, flags=0x%08x\n",
3290             __func__,
3291             le16toh(phy_info->channel),
3292             le16toh(phy_info->phy_flags));
3293
3294         /*
3295          * Populate an RX state struct with the provided information.
3296          */
3297         bzero(&rxs, sizeof(rxs));
3298         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3299         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3300         rxs.c_ieee = le16toh(phy_info->channel);
3301         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3302                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3303         } else {
3304                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3305         }
3306         /* rssi is in 1/2db units */
3307         rxs.rssi = rssi * 2;
3308         rxs.nf = sc->sc_noise;
3309
3310         if (ieee80211_radiotap_active_vap(vap)) {
3311                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3312
3313                 tap->wr_flags = 0;
3314                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3315                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3316                 tap->wr_chan_freq = htole16(rxs.c_freq);
3317                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3318                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3319                 tap->wr_dbm_antsignal = (int8_t)rssi;
3320                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3321                 tap->wr_tsft = phy_info->system_timestamp;
3322                 switch (phy_info->rate) {
3323                 /* CCK rates. */
3324                 case  10: tap->wr_rate =   2; break;
3325                 case  20: tap->wr_rate =   4; break;
3326                 case  55: tap->wr_rate =  11; break;
3327                 case 110: tap->wr_rate =  22; break;
3328                 /* OFDM rates. */
3329                 case 0xd: tap->wr_rate =  12; break;
3330                 case 0xf: tap->wr_rate =  18; break;
3331                 case 0x5: tap->wr_rate =  24; break;
3332                 case 0x7: tap->wr_rate =  36; break;
3333                 case 0x9: tap->wr_rate =  48; break;
3334                 case 0xb: tap->wr_rate =  72; break;
3335                 case 0x1: tap->wr_rate =  96; break;
3336                 case 0x3: tap->wr_rate = 108; break;
3337                 /* Unknown rate: should not happen. */
3338                 default:  tap->wr_rate =   0;
3339                 }
3340         }
3341
3342         IWM_UNLOCK(sc);
3343         if (ni != NULL) {
3344                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3345                 ieee80211_input_mimo(ni, m, &rxs);
3346                 ieee80211_free_node(ni);
3347         } else {
3348                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3349                 ieee80211_input_mimo_all(ic, m, &rxs);
3350         }
3351         IWM_LOCK(sc);
3352 }
3353
3354 static int
3355 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3356         struct iwm_node *in)
3357 {
3358         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3359         struct ieee80211_node *ni = &in->in_ni;
3360         struct ieee80211vap *vap = ni->ni_vap;
3361         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3362         int failack = tx_resp->failure_frame;
3363
3364         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3365
3366         /* Update rate control statistics. */
3367         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3368             __func__,
3369             (int) le16toh(tx_resp->status.status),
3370             (int) le16toh(tx_resp->status.sequence),
3371             tx_resp->frame_count,
3372             tx_resp->bt_kill_count,
3373             tx_resp->failure_rts,
3374             tx_resp->failure_frame,
3375             le32toh(tx_resp->initial_rate),
3376             (int) le16toh(tx_resp->wireless_media_time));
3377
3378         if (status != IWM_TX_STATUS_SUCCESS &&
3379             status != IWM_TX_STATUS_DIRECT_DONE) {
3380                 ieee80211_ratectl_tx_complete(vap, ni,
3381                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3382                 return (1);
3383         } else {
3384                 ieee80211_ratectl_tx_complete(vap, ni,
3385                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3386                 return (0);
3387         }
3388 }
3389
3390 static void
3391 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3392         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3393 {
3394         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3395         int idx = cmd_hdr->idx;
3396         int qid = cmd_hdr->qid;
3397         struct iwm_tx_ring *ring = &sc->txq[qid];
3398         struct iwm_tx_data *txd = &ring->data[idx];
3399         struct iwm_node *in = txd->in;
3400         struct mbuf *m = txd->m;
3401         int status;
3402
3403         KASSERT(txd->done == 0, ("txd not done"));
3404         KASSERT(txd->in != NULL, ("txd without node"));
3405         KASSERT(txd->m != NULL, ("txd without mbuf"));
3406
3407         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3408
3409         sc->sc_tx_timer = 0;
3410
3411         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3412
3413         /* Unmap and free mbuf. */
3414         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3415         bus_dmamap_unload(ring->data_dmat, txd->map);
3416
3417         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3418             "free txd %p, in %p\n", txd, txd->in);
3419         txd->done = 1;
3420         txd->m = NULL;
3421         txd->in = NULL;
3422
3423         ieee80211_tx_complete(&in->in_ni, m, status);
3424
3425         if (--ring->queued < IWM_TX_RING_LOMARK) {
3426                 sc->qfullmsk &= ~(1 << ring->qid);
3427                 if (sc->qfullmsk == 0) {
3428                         iwm_start(sc);
3429                 }
3430         }
3431 }
3432
3433 /*
3434  * transmit side
3435  */
3436
3437 /*
3438  * Process a "command done" firmware notification.  This is where we wakeup
3439  * processes waiting for a synchronous command completion.
3440  * from if_iwn
3441  */
3442 static void
3443 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3444 {
3445         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3446         struct iwm_tx_data *data;
3447
3448         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3449                 return; /* Not a command ack. */
3450         }
3451
3452         data = &ring->data[pkt->hdr.idx];
3453
3454         /* If the command was mapped in an mbuf, free it. */
3455         if (data->m != NULL) {
3456                 bus_dmamap_sync(ring->data_dmat, data->map,
3457                     BUS_DMASYNC_POSTWRITE);
3458                 bus_dmamap_unload(ring->data_dmat, data->map);
3459                 m_freem(data->m);
3460                 data->m = NULL;
3461         }
3462         wakeup(&ring->desc[pkt->hdr.idx]);
3463
3464         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3465                 device_printf(sc->sc_dev,
3466                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3467                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3468                 /* XXX call iwm_force_nmi() */
3469         }
3470
3471         KKASSERT(ring->queued > 0);
3472         ring->queued--;
3473         if (ring->queued == 0)
3474                 iwm_pcie_clear_cmd_in_flight(sc);
3475 }
3476
3477 #if 0
3478 /*
3479  * necessary only for block ack mode
3480  */
3481 void
3482 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3483         uint16_t len)
3484 {
3485         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3486         uint16_t w_val;
3487
3488         scd_bc_tbl = sc->sched_dma.vaddr;
3489
3490         len += 8; /* magic numbers came naturally from paris */
3491         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3492                 len = roundup(len, 4) / 4;
3493
3494         w_val = htole16(sta_id << 12 | len);
3495
3496         /* Update TX scheduler. */
3497         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3498         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3499             BUS_DMASYNC_PREWRITE);
3500
3501         /* I really wonder what this is ?!? */
3502         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3503                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3504                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3505                     BUS_DMASYNC_PREWRITE);
3506         }
3507 }
3508 #endif
3509
3510 /*
3511  * Take an 802.11 (non-n) rate, find the relevant rate
3512  * table entry.  return the index into in_ridx[].
3513  *
3514  * The caller then uses that index back into in_ridx
3515  * to figure out the rate index programmed /into/
3516  * the firmware for this given node.
3517  */
3518 static int
3519 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3520     uint8_t rate)
3521 {
3522         int i;
3523         uint8_t r;
3524
3525         for (i = 0; i < nitems(in->in_ridx); i++) {
3526                 r = iwm_rates[in->in_ridx[i]].rate;
3527                 if (rate == r)
3528                         return (i);
3529         }
3530         /* XXX Return the first */
3531         /* XXX TODO: have it return the /lowest/ */
3532         return (0);
3533 }
3534
3535 /*
3536  * Fill in the rate related information for a transmit command.
3537  */
3538 static const struct iwm_rate *
3539 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3540         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3541 {
3542         struct ieee80211com *ic = &sc->sc_ic;
3543         struct ieee80211_node *ni = &in->in_ni;
3544         const struct iwm_rate *rinfo;
3545         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3546         int ridx, rate_flags;
3547
3548         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3549         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3550
3551         /*
3552          * XXX TODO: everything about the rate selection here is terrible!
3553          */
3554
3555         if (type == IEEE80211_FC0_TYPE_DATA) {
3556                 int i;
3557                 /* for data frames, use RS table */
3558                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3559                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3560                 ridx = in->in_ridx[i];
3561
3562                 /* This is the index into the programmed table */
3563                 tx->initial_rate_index = i;
3564                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3565                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3566                     "%s: start with i=%d, txrate %d\n",
3567                     __func__, i, iwm_rates[ridx].rate);
3568         } else {
3569                 /*
3570                  * For non-data, use the lowest supported rate for the given
3571                  * operational mode.
3572                  *
3573                  * Note: there may not be any rate control information available.
3574                  * This driver currently assumes if we're transmitting data
3575                  * frames, use the rate control table.  Grr.
3576                  *
3577                  * XXX TODO: use the configured rate for the traffic type!
3578                  * XXX TODO: this should be per-vap, not curmode; as we later
3579                  * on we'll want to handle off-channel stuff (eg TDLS).
3580                  */
3581                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3582                         /*
3583                          * XXX this assumes the mode is either 11a or not 11a;
3584                          * definitely won't work for 11n.
3585                          */
3586                         ridx = IWM_RIDX_OFDM;
3587                 } else {
3588                         ridx = IWM_RIDX_CCK;
3589                 }
3590         }
3591
3592         rinfo = &iwm_rates[ridx];
3593
3594         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3595             __func__, ridx,
3596             rinfo->rate,
3597             !! (IWM_RIDX_IS_CCK(ridx))
3598             );
3599
3600         /* XXX TODO: hard-coded TX antenna? */
3601         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3602         if (IWM_RIDX_IS_CCK(ridx))
3603                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3604         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3605
3606         return rinfo;
3607 }
3608
3609 #define TB0_SIZE 16
3610 static int
3611 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3612 {
3613         struct ieee80211com *ic = &sc->sc_ic;
3614         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3615         struct iwm_node *in = IWM_NODE(ni);
3616         struct iwm_tx_ring *ring;
3617         struct iwm_tx_data *data;
3618         struct iwm_tfd *desc;
3619         struct iwm_device_cmd *cmd;
3620         struct iwm_tx_cmd *tx;
3621         struct ieee80211_frame *wh;
3622         struct ieee80211_key *k = NULL;
3623 #if !defined(__DragonFly__)
3624         struct mbuf *m1;
3625 #endif
3626         const struct iwm_rate *rinfo;
3627         uint32_t flags;
3628         u_int hdrlen;
3629         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3630         int nsegs;
3631         uint8_t tid, type;
3632         int i, totlen, error, pad;
3633
3634         wh = mtod(m, struct ieee80211_frame *);
3635         hdrlen = ieee80211_anyhdrsize(wh);
3636         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3637         tid = 0;
3638         ring = &sc->txq[ac];
3639         desc = &ring->desc[ring->cur];
3640         memset(desc, 0, sizeof(*desc));
3641         data = &ring->data[ring->cur];
3642
3643         /* Fill out iwm_tx_cmd to send to the firmware */
3644         cmd = &ring->cmd[ring->cur];
3645         cmd->hdr.code = IWM_TX_CMD;
3646         cmd->hdr.flags = 0;
3647         cmd->hdr.qid = ring->qid;
3648         cmd->hdr.idx = ring->cur;
3649
3650         tx = (void *)cmd->data;
3651         memset(tx, 0, sizeof(*tx));
3652
3653         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3654
3655         /* Encrypt the frame if need be. */
3656         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3657                 /* Retrieve key for TX && do software encryption. */
3658                 k = ieee80211_crypto_encap(ni, m);
3659                 if (k == NULL) {
3660                         m_freem(m);
3661                         return (ENOBUFS);
3662                 }
3663                 /* 802.11 header may have moved. */
3664                 wh = mtod(m, struct ieee80211_frame *);
3665         }
3666
3667         if (ieee80211_radiotap_active_vap(vap)) {
3668                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3669
3670                 tap->wt_flags = 0;
3671                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3672                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3673                 tap->wt_rate = rinfo->rate;
3674                 if (k != NULL)
3675                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3676                 ieee80211_radiotap_tx(vap, m);
3677         }
3678
3679
3680         totlen = m->m_pkthdr.len;
3681
3682         flags = 0;
3683         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3684                 flags |= IWM_TX_CMD_FLG_ACK;
3685         }
3686
3687         if (type == IEEE80211_FC0_TYPE_DATA
3688             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3689             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3690                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3691         }
3692
3693         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3694             type != IEEE80211_FC0_TYPE_DATA)
3695                 tx->sta_id = sc->sc_aux_sta.sta_id;
3696         else
3697                 tx->sta_id = IWM_STATION_ID;
3698
3699         if (type == IEEE80211_FC0_TYPE_MGT) {
3700                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3701
3702                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3703                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3704                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3705                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3706                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3707                 } else {
3708                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3709                 }
3710         } else {
3711                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3712         }
3713
3714         if (hdrlen & 3) {
3715                 /* First segment length must be a multiple of 4. */
3716                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3717                 pad = 4 - (hdrlen & 3);
3718         } else
3719                 pad = 0;
3720
3721         tx->driver_txop = 0;
3722         tx->next_frame_len = 0;
3723
3724         tx->len = htole16(totlen);
3725         tx->tid_tspec = tid;
3726         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3727
3728         /* Set physical address of "scratch area". */
3729         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3730         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3731
3732         /* Copy 802.11 header in TX command. */
3733         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3734
3735         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3736
3737         tx->sec_ctl = 0;
3738         tx->tx_flags |= htole32(flags);
3739
3740         /* Trim 802.11 header. */
3741         m_adj(m, hdrlen);
3742 #if defined(__DragonFly__)
3743         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3744                                             segs, IWM_MAX_SCATTER - 2,
3745                                             &nsegs, BUS_DMA_NOWAIT);
3746 #else
3747         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3748             segs, &nsegs, BUS_DMA_NOWAIT);
3749 #endif
3750         if (error != 0) {
3751 #if defined(__DragonFly__)
3752                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3753                     error);
3754                 m_freem(m);
3755                 return error;
3756 #else
3757                 if (error != EFBIG) {
3758                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3759                             error);
3760                         m_freem(m);
3761                         return error;
3762                 }
3763                 /* Too many DMA segments, linearize mbuf. */
3764                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3765                 if (m1 == NULL) {
3766                         device_printf(sc->sc_dev,
3767                             "%s: could not defrag mbuf\n", __func__);
3768                         m_freem(m);
3769                         return (ENOBUFS);
3770                 }
3771                 m = m1;
3772
3773                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3774                     segs, &nsegs, BUS_DMA_NOWAIT);
3775                 if (error != 0) {
3776                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3777                             error);
3778                         m_freem(m);
3779                         return error;
3780                 }
3781 #endif
3782         }
3783         data->m = m;
3784         data->in = in;
3785         data->done = 0;
3786
3787         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3788             "sending txd %p, in %p\n", data, data->in);
3789         KASSERT(data->in != NULL, ("node is NULL"));
3790
3791         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3792             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3793             ring->qid, ring->cur, totlen, nsegs,
3794             le32toh(tx->tx_flags),
3795             le32toh(tx->rate_n_flags),
3796             tx->initial_rate_index
3797             );
3798
3799         /* Fill TX descriptor. */
3800         desc->num_tbs = 2 + nsegs;
3801
3802         desc->tbs[0].lo = htole32(data->cmd_paddr);
3803         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3804             (TB0_SIZE << 4);
3805         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3806         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3807             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3808               + hdrlen + pad - TB0_SIZE) << 4);
3809
3810         /* Other DMA segments are for data payload. */
3811         for (i = 0; i < nsegs; i++) {
3812                 seg = &segs[i];
3813                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3814                 desc->tbs[i+2].hi_n_len = \
3815                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3816                     | ((seg->ds_len) << 4);
3817         }
3818
3819         bus_dmamap_sync(ring->data_dmat, data->map,
3820             BUS_DMASYNC_PREWRITE);
3821         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3822             BUS_DMASYNC_PREWRITE);
3823         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3824             BUS_DMASYNC_PREWRITE);
3825
3826 #if 0
3827         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3828 #endif
3829
3830         /* Kick TX ring. */
3831         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3832         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3833
3834         /* Mark TX ring as full if we reach a certain threshold. */
3835         if (++ring->queued > IWM_TX_RING_HIMARK) {
3836                 sc->qfullmsk |= 1 << ring->qid;
3837         }
3838
3839         return 0;
3840 }
3841
3842 static int
3843 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3844     const struct ieee80211_bpf_params *params)
3845 {
3846         struct ieee80211com *ic = ni->ni_ic;
3847         struct iwm_softc *sc = ic->ic_softc;
3848         int error = 0;
3849
3850         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3851             "->%s begin\n", __func__);
3852
3853         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3854                 m_freem(m);
3855                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3856                     "<-%s not RUNNING\n", __func__);
3857                 return (ENETDOWN);
3858         }
3859
3860         IWM_LOCK(sc);
3861         /* XXX fix this */
3862         if (params == NULL) {
3863                 error = iwm_tx(sc, m, ni, 0);
3864         } else {
3865                 error = iwm_tx(sc, m, ni, 0);
3866         }
3867         sc->sc_tx_timer = 5;
3868         IWM_UNLOCK(sc);
3869
3870         return (error);
3871 }
3872
3873 /*
3874  * mvm/tx.c
3875  */
3876
3877 /*
3878  * Note that there are transports that buffer frames before they reach
3879  * the firmware. This means that after flush_tx_path is called, the
3880  * queue might not be empty. The race-free way to handle this is to:
3881  * 1) set the station as draining
3882  * 2) flush the Tx path
3883  * 3) wait for the transport queues to be empty
3884  */
3885 static int
3886 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3887 {
3888         int ret;
3889         struct iwm_tx_path_flush_cmd flush_cmd = {
3890                 .queues_ctl = htole32(tfd_msk),
3891                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3892         };
3893
3894         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3895             sizeof(flush_cmd), &flush_cmd);
3896         if (ret)
3897                 device_printf(sc->sc_dev,
3898                     "Flushing tx queue failed: %d\n", ret);
3899         return ret;
3900 }
3901
3902 static int
3903 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3904         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3905 {
3906         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3907             cmd, status);
3908 }
3909
3910 /* send station add/update command to firmware */
3911 static int
3912 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3913 {
3914         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3915         int ret;
3916         uint32_t status;
3917
3918         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3919
3920         add_sta_cmd.sta_id = IWM_STATION_ID;
3921         add_sta_cmd.mac_id_n_color
3922             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3923                 IWM_DEFAULT_COLOR));
3924         if (!update) {
3925                 int ac;
3926                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3927                         add_sta_cmd.tfd_queue_msk |=
3928                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3929                 }
3930                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3931         }
3932         add_sta_cmd.add_modify = update ? 1 : 0;
3933         add_sta_cmd.station_flags_msk
3934             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3935         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3936         if (update)
3937                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3938
3939         status = IWM_ADD_STA_SUCCESS;
3940         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3941         if (ret)
3942                 return ret;
3943
3944         switch (status) {
3945         case IWM_ADD_STA_SUCCESS:
3946                 break;
3947         default:
3948                 ret = EIO;
3949                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3950                 break;
3951         }
3952
3953         return ret;
3954 }
3955
3956 static int
3957 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3958 {
3959         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3960 }
3961
3962 static int
3963 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3964 {
3965         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3966 }
3967
3968 static int
3969 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3970         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3971 {
3972         struct iwm_mvm_add_sta_cmd_v7 cmd;
3973         int ret;
3974         uint32_t status;
3975
3976         memset(&cmd, 0, sizeof(cmd));
3977         cmd.sta_id = sta->sta_id;
3978         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3979
3980         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3981         cmd.tid_disable_tx = htole16(0xffff);
3982
3983         if (addr)
3984                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3985
3986         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3987         if (ret)
3988                 return ret;
3989
3990         switch (status) {
3991         case IWM_ADD_STA_SUCCESS:
3992                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3993                     "%s: Internal station added.\n", __func__);
3994                 return 0;
3995         default:
3996                 device_printf(sc->sc_dev,
3997                     "%s: Add internal station failed, status=0x%x\n",
3998                     __func__, status);
3999                 ret = EIO;
4000                 break;
4001         }
4002         return ret;
4003 }
4004
4005 static int
4006 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4007 {
4008         int ret;
4009
4010         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4011         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4012
4013         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4014         if (ret)
4015                 return ret;
4016
4017         ret = iwm_mvm_add_int_sta_common(sc,
4018             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4019
4020         if (ret)
4021                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4022         return ret;
4023 }
4024
4025 static int
4026 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4027 {
4028         struct iwm_time_quota_cmd cmd;
4029         int i, idx, ret, num_active_macs, quota, quota_rem;
4030         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4031         int n_ifs[IWM_MAX_BINDINGS] = {0, };
4032         uint16_t id;
4033
4034         memset(&cmd, 0, sizeof(cmd));
4035
4036         /* currently, PHY ID == binding ID */
4037         if (in) {
4038                 id = in->in_phyctxt->id;
4039                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4040                 colors[id] = in->in_phyctxt->color;
4041
4042                 if (1)
4043                         n_ifs[id] = 1;
4044         }
4045
4046         /*
4047          * The FW's scheduling session consists of
4048          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4049          * equally between all the bindings that require quota
4050          */
4051         num_active_macs = 0;
4052         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4053                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4054                 num_active_macs += n_ifs[i];
4055         }
4056
4057         quota = 0;
4058         quota_rem = 0;
4059         if (num_active_macs) {
4060                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4061                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4062         }
4063
4064         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4065                 if (colors[i] < 0)
4066                         continue;
4067
4068                 cmd.quotas[idx].id_and_color =
4069                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4070
4071                 if (n_ifs[i] <= 0) {
4072                         cmd.quotas[idx].quota = htole32(0);
4073                         cmd.quotas[idx].max_duration = htole32(0);
4074                 } else {
4075                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4076                         cmd.quotas[idx].max_duration = htole32(0);
4077                 }
4078                 idx++;
4079         }
4080
4081         /* Give the remainder of the session to the first binding */
4082         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4083
4084         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4085             sizeof(cmd), &cmd);
4086         if (ret)
4087                 device_printf(sc->sc_dev,
4088                     "%s: Failed to send quota: %d\n", __func__, ret);
4089         return ret;
4090 }
4091
4092 /*
4093  * ieee80211 routines
4094  */
4095
4096 /*
4097  * Change to AUTH state in 80211 state machine.  Roughly matches what
4098  * Linux does in bss_info_changed().
4099  */
4100 static int
4101 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4102 {
4103         struct ieee80211_node *ni;
4104         struct iwm_node *in;
4105         struct iwm_vap *iv = IWM_VAP(vap);
4106         uint32_t duration;
4107         int error;
4108
4109         /*
4110          * XXX i have a feeling that the vap node is being
4111          * freed from underneath us. Grr.
4112          */
4113         ni = ieee80211_ref_node(vap->iv_bss);
4114         in = IWM_NODE(ni);
4115         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4116             "%s: called; vap=%p, bss ni=%p\n",
4117             __func__,
4118             vap,
4119             ni);
4120
4121         in->in_assoc = 0;
4122
4123         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4124         if (error != 0)
4125                 return error;
4126
4127         error = iwm_allow_mcast(vap, sc);
4128         if (error) {
4129                 device_printf(sc->sc_dev,
4130                     "%s: failed to set multicast\n", __func__);
4131                 goto out;
4132         }
4133
4134         /*
4135          * This is where it deviates from what Linux does.
4136          *
4137          * Linux iwlwifi doesn't reset the nic each time, nor does it
4138          * call ctxt_add() here.  Instead, it adds it during vap creation,
4139          * and always does a mac_ctx_changed().
4140          *
4141          * The openbsd port doesn't attempt to do that - it reset things
4142          * at odd states and does the add here.
4143          *
4144          * So, until the state handling is fixed (ie, we never reset
4145          * the NIC except for a firmware failure, which should drag
4146          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4147          * contexts that are required), let's do a dirty hack here.
4148          */
4149         if (iv->is_uploaded) {
4150                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4151                         device_printf(sc->sc_dev,
4152                             "%s: failed to update MAC\n", __func__);
4153                         goto out;
4154                 }
4155                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4156                     in->in_ni.ni_chan, 1, 1)) != 0) {
4157                         device_printf(sc->sc_dev,
4158                             "%s: failed update phy ctxt\n", __func__);
4159                         goto out;
4160                 }
4161                 in->in_phyctxt = &sc->sc_phyctxt[0];
4162
4163                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4164                         device_printf(sc->sc_dev,
4165                             "%s: binding update cmd\n", __func__);
4166                         goto out;
4167                 }
4168                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4169                         device_printf(sc->sc_dev,
4170                             "%s: failed to update sta\n", __func__);
4171                         goto out;
4172                 }
4173         } else {
4174                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4175                         device_printf(sc->sc_dev,
4176                             "%s: failed to add MAC\n", __func__);
4177                         goto out;
4178                 }
4179                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4180                     in->in_ni.ni_chan, 1, 1)) != 0) {
4181                         device_printf(sc->sc_dev,
4182                             "%s: failed add phy ctxt!\n", __func__);
4183                         error = ETIMEDOUT;
4184                         goto out;
4185                 }
4186                 in->in_phyctxt = &sc->sc_phyctxt[0];
4187
4188                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4189                         device_printf(sc->sc_dev,
4190                             "%s: binding add cmd\n", __func__);
4191                         goto out;
4192                 }
4193                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4194                         device_printf(sc->sc_dev,
4195                             "%s: failed to add sta\n", __func__);
4196                         goto out;
4197                 }
4198         }
4199
4200         /*
4201          * Prevent the FW from wandering off channel during association
4202          * by "protecting" the session with a time event.
4203          */
4204         /* XXX duration is in units of TU, not MS */
4205         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4206         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4207         DELAY(100);
4208
4209         error = 0;
4210 out:
4211         ieee80211_free_node(ni);
4212         return (error);
4213 }
4214
4215 static int
4216 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4217 {
4218         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4219         int error;
4220
4221         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4222                 device_printf(sc->sc_dev,
4223                     "%s: failed to update STA\n", __func__);
4224                 return error;
4225         }
4226
4227         in->in_assoc = 1;
4228         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4229                 device_printf(sc->sc_dev,
4230                     "%s: failed to update MAC\n", __func__);
4231                 return error;
4232         }
4233
4234         return 0;
4235 }
4236
4237 static int
4238 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4239 {
4240         uint32_t tfd_msk;
4241
4242         /*
4243          * Ok, so *technically* the proper set of calls for going
4244          * from RUN back to SCAN is:
4245          *
4246          * iwm_mvm_power_mac_disable(sc, in);
4247          * iwm_mvm_mac_ctxt_changed(sc, in);
4248          * iwm_mvm_rm_sta(sc, in);
4249          * iwm_mvm_update_quotas(sc, NULL);
4250          * iwm_mvm_mac_ctxt_changed(sc, in);
4251          * iwm_mvm_binding_remove_vif(sc, in);
4252          * iwm_mvm_mac_ctxt_remove(sc, in);
4253          *
4254          * However, that freezes the device not matter which permutations
4255          * and modifications are attempted.  Obviously, this driver is missing
4256          * something since it works in the Linux driver, but figuring out what
4257          * is missing is a little more complicated.  Now, since we're going
4258          * back to nothing anyway, we'll just do a complete device reset.
4259          * Up your's, device!
4260          */
4261         /*
4262          * Just using 0xf for the queues mask is fine as long as we only
4263          * get here from RUN state.
4264          */
4265         tfd_msk = 0xf;
4266         mbufq_drain(&sc->sc_snd);
4267         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4268         /*
4269          * We seem to get away with just synchronously sending the
4270          * IWM_TXPATH_FLUSH command.
4271          */
4272 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4273         iwm_stop_device(sc);
4274         iwm_init_hw(sc);
4275         if (in)
4276                 in->in_assoc = 0;
4277         return 0;
4278
4279 #if 0
4280         int error;
4281
4282         iwm_mvm_power_mac_disable(sc, in);
4283
4284         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4285                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4286                 return error;
4287         }
4288
4289         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4290                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4291                 return error;
4292         }
4293         error = iwm_mvm_rm_sta(sc, in);
4294         in->in_assoc = 0;
4295         iwm_mvm_update_quotas(sc, NULL);
4296         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4297                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4298                 return error;
4299         }
4300         iwm_mvm_binding_remove_vif(sc, in);
4301
4302         iwm_mvm_mac_ctxt_remove(sc, in);
4303
4304         return error;
4305 #endif
4306 }
4307
4308 static struct ieee80211_node *
4309 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4310 {
4311         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4312             M_INTWAIT | M_ZERO);
4313 }
4314
4315 static void
4316 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4317 {
4318         struct ieee80211_node *ni = &in->in_ni;
4319         struct iwm_lq_cmd *lq = &in->in_lq;
4320         int nrates = ni->ni_rates.rs_nrates;
4321         int i, ridx, tab = 0;
4322         int txant = 0;
4323
4324         if (nrates > nitems(lq->rs_table)) {
4325                 device_printf(sc->sc_dev,
4326                     "%s: node supports %d rates, driver handles "
4327                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4328                 return;
4329         }
4330         if (nrates == 0) {
4331                 device_printf(sc->sc_dev,
4332                     "%s: node supports 0 rates, odd!\n", __func__);
4333                 return;
4334         }
4335
4336         /*
4337          * XXX .. and most of iwm_node is not initialised explicitly;
4338          * it's all just 0x0 passed to the firmware.
4339          */
4340
4341         /* first figure out which rates we should support */
4342         /* XXX TODO: this isn't 11n aware /at all/ */
4343         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4344         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4345             "%s: nrates=%d\n", __func__, nrates);
4346
4347         /*
4348          * Loop over nrates and populate in_ridx from the highest
4349          * rate to the lowest rate.  Remember, in_ridx[] has
4350          * IEEE80211_RATE_MAXSIZE entries!
4351          */
4352         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4353                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4354
4355                 /* Map 802.11 rate to HW rate index. */
4356                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4357                         if (iwm_rates[ridx].rate == rate)
4358                                 break;
4359                 if (ridx > IWM_RIDX_MAX) {
4360                         device_printf(sc->sc_dev,
4361                             "%s: WARNING: device rate for %d not found!\n",
4362                             __func__, rate);
4363                 } else {
4364                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4365                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4366                             __func__,
4367                             i,
4368                             rate,
4369                             ridx);
4370                         in->in_ridx[i] = ridx;
4371                 }
4372         }
4373
4374         /* then construct a lq_cmd based on those */
4375         memset(lq, 0, sizeof(*lq));
4376         lq->sta_id = IWM_STATION_ID;
4377
4378         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4379         if (ni->ni_flags & IEEE80211_NODE_HT)
4380                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4381
4382         /*
4383          * are these used? (we don't do SISO or MIMO)
4384          * need to set them to non-zero, though, or we get an error.
4385          */
4386         lq->single_stream_ant_msk = 1;
4387         lq->dual_stream_ant_msk = 1;
4388
4389         /*
4390          * Build the actual rate selection table.
4391          * The lowest bits are the rates.  Additionally,
4392          * CCK needs bit 9 to be set.  The rest of the bits
4393          * we add to the table select the tx antenna
4394          * Note that we add the rates in the highest rate first
4395          * (opposite of ni_rates).
4396          */
4397         /*
4398          * XXX TODO: this should be looping over the min of nrates
4399          * and LQ_MAX_RETRY_NUM.  Sigh.
4400          */
4401         for (i = 0; i < nrates; i++) {
4402                 int nextant;
4403
4404                 if (txant == 0)
4405                         txant = iwm_mvm_get_valid_tx_ant(sc);
4406                 nextant = 1<<(ffs(txant)-1);
4407                 txant &= ~nextant;
4408
4409                 /*
4410                  * Map the rate id into a rate index into
4411                  * our hardware table containing the
4412                  * configuration to use for this rate.
4413                  */
4414                 ridx = in->in_ridx[i];
4415                 tab = iwm_rates[ridx].plcp;
4416                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4417                 if (IWM_RIDX_IS_CCK(ridx))
4418                         tab |= IWM_RATE_MCS_CCK_MSK;
4419                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4420                     "station rate i=%d, rate=%d, hw=%x\n",
4421                     i, iwm_rates[ridx].rate, tab);
4422                 lq->rs_table[i] = htole32(tab);
4423         }
4424         /* then fill the rest with the lowest possible rate */
4425         for (i = nrates; i < nitems(lq->rs_table); i++) {
4426                 KASSERT(tab != 0, ("invalid tab"));
4427                 lq->rs_table[i] = htole32(tab);
4428         }
4429 }
4430
4431 static int
4432 iwm_media_change(struct ifnet *ifp)
4433 {
4434         struct ieee80211vap *vap = ifp->if_softc;
4435         struct ieee80211com *ic = vap->iv_ic;
4436         struct iwm_softc *sc = ic->ic_softc;
4437         int error;
4438
4439         error = ieee80211_media_change(ifp);
4440         if (error != ENETRESET)
4441                 return error;
4442
4443         IWM_LOCK(sc);
4444         if (ic->ic_nrunning > 0) {
4445                 iwm_stop(sc);
4446                 iwm_init(sc);
4447         }
4448         IWM_UNLOCK(sc);
4449         return error;
4450 }
4451
4452
4453 static int
4454 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4455 {
4456         struct iwm_vap *ivp = IWM_VAP(vap);
4457         struct ieee80211com *ic = vap->iv_ic;
4458         struct iwm_softc *sc = ic->ic_softc;
4459         struct iwm_node *in;
4460         int error;
4461
4462         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4463             "switching state %s -> %s\n",
4464             ieee80211_state_name[vap->iv_state],
4465             ieee80211_state_name[nstate]);
4466         IEEE80211_UNLOCK(ic);
4467         IWM_LOCK(sc);
4468
4469         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4470                 iwm_led_blink_stop(sc);
4471
4472         /* disable beacon filtering if we're hopping out of RUN */
4473         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4474                 iwm_mvm_disable_beacon_filter(sc);
4475
4476                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4477                         in->in_assoc = 0;
4478
4479                 if (nstate == IEEE80211_S_INIT) {
4480                         IWM_UNLOCK(sc);
4481                         IEEE80211_LOCK(ic);
4482                         error = ivp->iv_newstate(vap, nstate, arg);
4483                         IEEE80211_UNLOCK(ic);
4484                         IWM_LOCK(sc);
4485                         iwm_release(sc, NULL);
4486                         IWM_UNLOCK(sc);
4487                         IEEE80211_LOCK(ic);
4488                         return error;
4489                 }
4490
4491                 /*
4492                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4493                  * above then the card will be completely reinitialized,
4494                  * so the driver must do everything necessary to bring the card
4495                  * from INIT to SCAN.
4496                  *
4497                  * Additionally, upon receiving deauth frame from AP,
4498                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4499                  * state. This will also fail with this driver, so bring the FSM
4500                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4501                  *
4502                  * XXX TODO: fix this for FreeBSD!
4503                  */
4504                 if (nstate == IEEE80211_S_SCAN ||
4505                     nstate == IEEE80211_S_AUTH ||
4506                     nstate == IEEE80211_S_ASSOC) {
4507                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4508                             "Force transition to INIT; MGT=%d\n", arg);
4509                         IWM_UNLOCK(sc);
4510                         IEEE80211_LOCK(ic);
4511                         /* Always pass arg as -1 since we can't Tx right now. */
4512                         /*
4513                          * XXX arg is just ignored anyway when transitioning
4514                          *     to IEEE80211_S_INIT.
4515                          */
4516                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4517                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4518                             "Going INIT->SCAN\n");
4519                         nstate = IEEE80211_S_SCAN;
4520                         IEEE80211_UNLOCK(ic);
4521                         IWM_LOCK(sc);
4522                 }
4523         }
4524
4525         switch (nstate) {
4526         case IEEE80211_S_INIT:
4527                 break;
4528
4529         case IEEE80211_S_AUTH:
4530                 if ((error = iwm_auth(vap, sc)) != 0) {
4531                         device_printf(sc->sc_dev,
4532                             "%s: could not move to auth state: %d\n",
4533                             __func__, error);
4534                         break;
4535                 }
4536                 break;
4537
4538         case IEEE80211_S_ASSOC:
4539                 if ((error = iwm_assoc(vap, sc)) != 0) {
4540                         device_printf(sc->sc_dev,
4541                             "%s: failed to associate: %d\n", __func__,
4542                             error);
4543                         break;
4544                 }
4545                 break;
4546
4547         case IEEE80211_S_RUN:
4548         {
4549                 struct iwm_host_cmd cmd = {
4550                         .id = IWM_LQ_CMD,
4551                         .len = { sizeof(in->in_lq), },
4552                         .flags = IWM_CMD_SYNC,
4553                 };
4554
4555                 /* Update the association state, now we have it all */
4556                 /* (eg associd comes in at this point */
4557                 error = iwm_assoc(vap, sc);
4558                 if (error != 0) {
4559                         device_printf(sc->sc_dev,
4560                             "%s: failed to update association state: %d\n",
4561                             __func__,
4562                             error);
4563                         break;
4564                 }
4565
4566                 in = IWM_NODE(vap->iv_bss);
4567                 iwm_mvm_power_mac_update_mode(sc, in);
4568                 iwm_mvm_enable_beacon_filter(sc, in);
4569                 iwm_mvm_update_quotas(sc, in);
4570                 iwm_setrates(sc, in);
4571
4572                 cmd.data[0] = &in->in_lq;
4573                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4574                         device_printf(sc->sc_dev,
4575                             "%s: IWM_LQ_CMD failed\n", __func__);
4576                 }
4577
4578                 iwm_mvm_led_enable(sc);
4579                 break;
4580         }
4581
4582         default:
4583                 break;
4584         }
4585         IWM_UNLOCK(sc);
4586         IEEE80211_LOCK(ic);
4587
4588         return (ivp->iv_newstate(vap, nstate, arg));
4589 }
4590
4591 void
4592 iwm_endscan_cb(void *arg, int pending)
4593 {
4594         struct iwm_softc *sc = arg;
4595         struct ieee80211com *ic = &sc->sc_ic;
4596
4597         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4598             "%s: scan ended\n",
4599             __func__);
4600
4601         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4602 }
4603
4604 /*
4605  * Aging and idle timeouts for the different possible scenarios
4606  * in default configuration
4607  */
4608 static const uint32_t
4609 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4610         {
4611                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4612                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4613         },
4614         {
4615                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4616                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4617         },
4618         {
4619                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4620                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4621         },
4622         {
4623                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4624                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4625         },
4626         {
4627                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4628                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4629         },
4630 };
4631
4632 /*
4633  * Aging and idle timeouts for the different possible scenarios
4634  * in single BSS MAC configuration.
4635  */
4636 static const uint32_t
4637 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4638         {
4639                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4640                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4641         },
4642         {
4643                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4644                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4645         },
4646         {
4647                 htole32(IWM_SF_MCAST_AGING_TIMER),
4648                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4649         },
4650         {
4651                 htole32(IWM_SF_BA_AGING_TIMER),
4652                 htole32(IWM_SF_BA_IDLE_TIMER)
4653         },
4654         {
4655                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4656                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4657         },
4658 };
4659
4660 static void
4661 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4662     struct ieee80211_node *ni)
4663 {
4664         int i, j, watermark;
4665
4666         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4667
4668         /*
4669          * If we are in association flow - check antenna configuration
4670          * capabilities of the AP station, and choose the watermark accordingly.
4671          */
4672         if (ni) {
4673                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4674 #ifdef notyet
4675                         if (ni->ni_rxmcs[2] != 0)
4676                                 watermark = IWM_SF_W_MARK_MIMO3;
4677                         else if (ni->ni_rxmcs[1] != 0)
4678                                 watermark = IWM_SF_W_MARK_MIMO2;
4679                         else
4680 #endif
4681                                 watermark = IWM_SF_W_MARK_SISO;
4682                 } else {
4683                         watermark = IWM_SF_W_MARK_LEGACY;
4684                 }
4685         /* default watermark value for unassociated mode. */
4686         } else {
4687                 watermark = IWM_SF_W_MARK_MIMO2;
4688         }
4689         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4690
4691         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4692                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4693                         sf_cmd->long_delay_timeouts[i][j] =
4694                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4695                 }
4696         }
4697
4698         if (ni) {
4699                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4700                        sizeof(iwm_sf_full_timeout));
4701         } else {
4702                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4703                        sizeof(iwm_sf_full_timeout_def));
4704         }
4705 }
4706
4707 static int
4708 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4709 {
4710         struct ieee80211com *ic = &sc->sc_ic;
4711         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4712         struct iwm_sf_cfg_cmd sf_cmd = {
4713                 .state = htole32(IWM_SF_FULL_ON),
4714         };
4715         int ret = 0;
4716
4717         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4718                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4719
4720         switch (new_state) {
4721         case IWM_SF_UNINIT:
4722         case IWM_SF_INIT_OFF:
4723                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4724                 break;
4725         case IWM_SF_FULL_ON:
4726                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4727                 break;
4728         default:
4729                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4730                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4731                           new_state);
4732                 return EINVAL;
4733         }
4734
4735         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4736                                    sizeof(sf_cmd), &sf_cmd);
4737         return ret;
4738 }
4739
4740 static int
4741 iwm_send_bt_init_conf(struct iwm_softc *sc)
4742 {
4743         struct iwm_bt_coex_cmd bt_cmd;
4744
4745         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4746         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4747
4748         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4749             &bt_cmd);
4750 }
4751
4752 static int
4753 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4754 {
4755         struct iwm_mcc_update_cmd mcc_cmd;
4756         struct iwm_host_cmd hcmd = {
4757                 .id = IWM_MCC_UPDATE_CMD,
4758                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4759                 .data = { &mcc_cmd },
4760         };
4761         int ret;
4762 #ifdef IWM_DEBUG
4763         struct iwm_rx_packet *pkt;
4764         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4765         struct iwm_mcc_update_resp *mcc_resp;
4766         int n_channels;
4767         uint16_t mcc;
4768 #endif
4769         int resp_v2 = isset(sc->sc_enabled_capa,
4770             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4771
4772         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4773         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4774         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4775             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4776                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4777         else
4778                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4779
4780         if (resp_v2)
4781                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4782         else
4783                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4784
4785         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4786             "send MCC update to FW with '%c%c' src = %d\n",
4787             alpha2[0], alpha2[1], mcc_cmd.source_id);
4788
4789         ret = iwm_send_cmd(sc, &hcmd);
4790         if (ret)
4791                 return ret;
4792
4793 #ifdef IWM_DEBUG
4794         pkt = hcmd.resp_pkt;
4795
4796         /* Extract MCC response */
4797         if (resp_v2) {
4798                 mcc_resp = (void *)pkt->data;
4799                 mcc = mcc_resp->mcc;
4800                 n_channels =  le32toh(mcc_resp->n_channels);
4801         } else {
4802                 mcc_resp_v1 = (void *)pkt->data;
4803                 mcc = mcc_resp_v1->mcc;
4804                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4805         }
4806
4807         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4808         if (mcc == 0)
4809                 mcc = 0x3030;  /* "00" - world */
4810
4811         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4812             "regulatory domain '%c%c' (%d channels available)\n",
4813             mcc >> 8, mcc & 0xff, n_channels);
4814 #endif
4815         iwm_free_resp(sc, &hcmd);
4816
4817         return 0;
4818 }
4819
4820 static void
4821 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4822 {
4823         struct iwm_host_cmd cmd = {
4824                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4825                 .len = { sizeof(uint32_t), },
4826                 .data = { &backoff, },
4827         };
4828
4829         if (iwm_send_cmd(sc, &cmd) != 0) {
4830                 device_printf(sc->sc_dev,
4831                     "failed to change thermal tx backoff\n");
4832         }
4833 }
4834
4835 static int
4836 iwm_init_hw(struct iwm_softc *sc)
4837 {
4838         struct ieee80211com *ic = &sc->sc_ic;
4839         int error, i, ac;
4840
4841         if ((error = iwm_start_hw(sc)) != 0) {
4842                 kprintf("iwm_start_hw: failed %d\n", error);
4843                 return error;
4844         }
4845
4846         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4847                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4848                 return error;
4849         }
4850
4851         /*
4852          * should stop and start HW since that INIT
4853          * image just loaded
4854          */
4855         iwm_stop_device(sc);
4856         if ((error = iwm_start_hw(sc)) != 0) {
4857                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4858                 return error;
4859         }
4860
4861         /* omstart, this time with the regular firmware */
4862         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4863         if (error) {
4864                 device_printf(sc->sc_dev, "could not load firmware\n");
4865                 goto error;
4866         }
4867
4868         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4869                 device_printf(sc->sc_dev, "bt init conf failed\n");
4870                 goto error;
4871         }
4872
4873         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4874         if (error != 0) {
4875                 device_printf(sc->sc_dev, "antenna config failed\n");
4876                 goto error;
4877         }
4878
4879         /* Send phy db control command and then phy db calibration */
4880         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4881                 goto error;
4882
4883         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4884                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4885                 goto error;
4886         }
4887
4888         /* Add auxiliary station for scanning */
4889         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4890                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4891                 goto error;
4892         }
4893
4894         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4895                 /*
4896                  * The channel used here isn't relevant as it's
4897                  * going to be overwritten in the other flows.
4898                  * For now use the first channel we have.
4899                  */
4900                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4901                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4902                         goto error;
4903         }
4904
4905         /* Initialize tx backoffs to the minimum. */
4906         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4907                 iwm_mvm_tt_tx_backoff(sc, 0);
4908
4909         error = iwm_mvm_power_update_device(sc);
4910         if (error)
4911                 goto error;
4912
4913         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4914                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4915                         goto error;
4916         }
4917
4918         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4919                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4920                         goto error;
4921         }
4922
4923         /* Enable Tx queues. */
4924         for (ac = 0; ac < WME_NUM_AC; ac++) {
4925                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4926                     iwm_mvm_ac_to_tx_fifo[ac]);
4927                 if (error)
4928                         goto error;
4929         }
4930
4931         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4932                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4933                 goto error;
4934         }
4935
4936         return 0;
4937
4938  error:
4939         iwm_stop_device(sc);
4940         return error;
4941 }
4942
4943 /* Allow multicast from our BSSID. */
4944 static int
4945 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4946 {
4947         struct ieee80211_node *ni = vap->iv_bss;
4948         struct iwm_mcast_filter_cmd *cmd;
4949         size_t size;
4950         int error;
4951
4952         size = roundup(sizeof(*cmd), 4);
4953         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4954         if (cmd == NULL)
4955                 return ENOMEM;
4956         cmd->filter_own = 1;
4957         cmd->port_id = 0;
4958         cmd->count = 0;
4959         cmd->pass_all = 1;
4960         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4961
4962         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4963             IWM_CMD_SYNC, size, cmd);
4964         kfree(cmd, M_DEVBUF);
4965
4966         return (error);
4967 }
4968
4969 /*
4970  * ifnet interfaces
4971  */
4972
4973 static void
4974 iwm_init(struct iwm_softc *sc)
4975 {
4976         int error;
4977
4978         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4979                 return;
4980         }
4981         sc->sc_generation++;
4982         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4983
4984         if ((error = iwm_init_hw(sc)) != 0) {
4985                 kprintf("iwm_init_hw failed %d\n", error);
4986                 iwm_stop(sc);
4987                 return;
4988         }
4989
4990         /*
4991          * Ok, firmware loaded and we are jogging
4992          */
4993         sc->sc_flags |= IWM_FLAG_HW_INITED;
4994         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4995 }
4996
4997 static int
4998 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4999 {
5000         struct iwm_softc *sc;
5001         int error;
5002
5003         sc = ic->ic_softc;
5004
5005         IWM_LOCK(sc);
5006         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5007                 IWM_UNLOCK(sc);
5008                 return (ENXIO);
5009         }
5010         error = mbufq_enqueue(&sc->sc_snd, m);
5011         if (error) {
5012                 IWM_UNLOCK(sc);
5013                 return (error);
5014         }
5015         iwm_start(sc);
5016         IWM_UNLOCK(sc);
5017         return (0);
5018 }
5019
5020 /*
5021  * Dequeue packets from sendq and call send.
5022  */
5023 static void
5024 iwm_start(struct iwm_softc *sc)
5025 {
5026         struct ieee80211_node *ni;
5027         struct mbuf *m;
5028         int ac = 0;
5029
5030         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5031         while (sc->qfullmsk == 0 &&
5032                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5033                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5034                 if (iwm_tx(sc, m, ni, ac) != 0) {
5035                         if_inc_counter(ni->ni_vap->iv_ifp,
5036                             IFCOUNTER_OERRORS, 1);
5037                         ieee80211_free_node(ni);
5038                         continue;
5039                 }
5040                 sc->sc_tx_timer = 15;
5041         }
5042         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5043 }
5044
5045 static void
5046 iwm_stop(struct iwm_softc *sc)
5047 {
5048
5049         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5050         sc->sc_flags |= IWM_FLAG_STOPPED;
5051         sc->sc_generation++;
5052         iwm_led_blink_stop(sc);
5053         sc->sc_tx_timer = 0;
5054         iwm_stop_device(sc);
5055         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5056 }
5057
5058 static void
5059 iwm_watchdog(void *arg)
5060 {
5061         struct iwm_softc *sc = arg;
5062
5063         if (sc->sc_tx_timer > 0) {
5064                 if (--sc->sc_tx_timer == 0) {
5065                         device_printf(sc->sc_dev, "device timeout\n");
5066 #ifdef IWM_DEBUG
5067                         iwm_nic_error(sc);
5068 #endif
5069                         iwm_stop(sc);
5070 #if defined(__DragonFly__)
5071                         ++sc->sc_ic.ic_oerrors;
5072 #else
5073                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5074 #endif
5075                         return;
5076                 }
5077         }
5078         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5079 }
5080
5081 static void
5082 iwm_parent(struct ieee80211com *ic)
5083 {
5084         struct iwm_softc *sc = ic->ic_softc;
5085         int startall = 0;
5086
5087         IWM_LOCK(sc);
5088         if (ic->ic_nrunning > 0) {
5089                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5090                         iwm_init(sc);
5091                         startall = 1;
5092                 }
5093         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5094                 iwm_stop(sc);
5095         IWM_UNLOCK(sc);
5096         if (startall)
5097                 ieee80211_start_all(ic);
5098 }
5099
5100 /*
5101  * The interrupt side of things
5102  */
5103
5104 /*
5105  * error dumping routines are from iwlwifi/mvm/utils.c
5106  */
5107
5108 /*
5109  * Note: This structure is read from the device with IO accesses,
5110  * and the reading already does the endian conversion. As it is
5111  * read with uint32_t-sized accesses, any members with a different size
5112  * need to be ordered correctly though!
5113  */
5114 struct iwm_error_event_table {
5115         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5116         uint32_t error_id;              /* type of error */
5117         uint32_t trm_hw_status0;        /* TRM HW status */
5118         uint32_t trm_hw_status1;        /* TRM HW status */
5119         uint32_t blink2;                /* branch link */
5120         uint32_t ilink1;                /* interrupt link */
5121         uint32_t ilink2;                /* interrupt link */
5122         uint32_t data1;         /* error-specific data */
5123         uint32_t data2;         /* error-specific data */
5124         uint32_t data3;         /* error-specific data */
5125         uint32_t bcon_time;             /* beacon timer */
5126         uint32_t tsf_low;               /* network timestamp function timer */
5127         uint32_t tsf_hi;                /* network timestamp function timer */
5128         uint32_t gp1;           /* GP1 timer register */
5129         uint32_t gp2;           /* GP2 timer register */
5130         uint32_t fw_rev_type;   /* firmware revision type */
5131         uint32_t major;         /* uCode version major */
5132         uint32_t minor;         /* uCode version minor */
5133         uint32_t hw_ver;                /* HW Silicon version */
5134         uint32_t brd_ver;               /* HW board version */
5135         uint32_t log_pc;                /* log program counter */
5136         uint32_t frame_ptr;             /* frame pointer */
5137         uint32_t stack_ptr;             /* stack pointer */
5138         uint32_t hcmd;          /* last host command header */
5139         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5140                                  * rxtx_flag */
5141         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5142                                  * host_flag */
5143         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5144                                  * enc_flag */
5145         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5146                                  * time_flag */
5147         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5148                                  * wico interrupt */
5149         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5150         uint32_t wait_event;            /* wait event() caller address */
5151         uint32_t l2p_control;   /* L2pControlField */
5152         uint32_t l2p_duration;  /* L2pDurationField */
5153         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5154         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5155         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5156                                  * (LMPM_PMG_SEL) */
5157         uint32_t u_timestamp;   /* indicate when the date and time of the
5158                                  * compilation */
5159         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5160 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5161
5162 /*
5163  * UMAC error struct - relevant starting from family 8000 chip.
5164  * Note: This structure is read from the device with IO accesses,
5165  * and the reading already does the endian conversion. As it is
5166  * read with u32-sized accesses, any members with a different size
5167  * need to be ordered correctly though!
5168  */
5169 struct iwm_umac_error_event_table {
5170         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5171         uint32_t error_id;      /* type of error */
5172         uint32_t blink1;        /* branch link */
5173         uint32_t blink2;        /* branch link */
5174         uint32_t ilink1;        /* interrupt link */
5175         uint32_t ilink2;        /* interrupt link */
5176         uint32_t data1;         /* error-specific data */
5177         uint32_t data2;         /* error-specific data */
5178         uint32_t data3;         /* error-specific data */
5179         uint32_t umac_major;
5180         uint32_t umac_minor;
5181         uint32_t frame_pointer; /* core register 27*/
5182         uint32_t stack_pointer; /* core register 28 */
5183         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5184         uint32_t nic_isr_pref;  /* ISR status register */
5185 } __packed;
5186
5187 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5188 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5189
5190 #ifdef IWM_DEBUG
5191 struct {
5192         const char *name;
5193         uint8_t num;
5194 } advanced_lookup[] = {
5195         { "NMI_INTERRUPT_WDG", 0x34 },
5196         { "SYSASSERT", 0x35 },
5197         { "UCODE_VERSION_MISMATCH", 0x37 },
5198         { "BAD_COMMAND", 0x38 },
5199         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5200         { "FATAL_ERROR", 0x3D },
5201         { "NMI_TRM_HW_ERR", 0x46 },
5202         { "NMI_INTERRUPT_TRM", 0x4C },
5203         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5204         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5205         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5206         { "NMI_INTERRUPT_HOST", 0x66 },
5207         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5208         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5209         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5210         { "ADVANCED_SYSASSERT", 0 },
5211 };
5212
5213 static const char *
5214 iwm_desc_lookup(uint32_t num)
5215 {
5216         int i;
5217
5218         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5219                 if (advanced_lookup[i].num == num)
5220                         return advanced_lookup[i].name;
5221
5222         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5223         return advanced_lookup[i].name;
5224 }
5225
5226 static void
5227 iwm_nic_umac_error(struct iwm_softc *sc)
5228 {
5229         struct iwm_umac_error_event_table table;
5230         uint32_t base;
5231
5232         base = sc->umac_error_event_table;
5233
5234         if (base < 0x800000) {
5235                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5236                     base);
5237                 return;
5238         }
5239
5240         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5241                 device_printf(sc->sc_dev, "reading errlog failed\n");
5242                 return;
5243         }
5244
5245         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5246                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5247                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5248                     sc->sc_flags, table.valid);
5249         }
5250
5251         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5252                 iwm_desc_lookup(table.error_id));
5253         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5254         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5255         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5256             table.ilink1);
5257         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5258             table.ilink2);
5259         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5260         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5261         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5262         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5263         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5264         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5265             table.frame_pointer);
5266         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5267             table.stack_pointer);
5268         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5269         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5270             table.nic_isr_pref);
5271 }
5272
5273 /*
5274  * Support for dumping the error log seemed like a good idea ...
5275  * but it's mostly hex junk and the only sensible thing is the
5276  * hw/ucode revision (which we know anyway).  Since it's here,
5277  * I'll just leave it in, just in case e.g. the Intel guys want to
5278  * help us decipher some "ADVANCED_SYSASSERT" later.
5279  */
5280 static void
5281 iwm_nic_error(struct iwm_softc *sc)
5282 {
5283         struct iwm_error_event_table table;
5284         uint32_t base;
5285
5286         device_printf(sc->sc_dev, "dumping device error log\n");
5287         base = sc->error_event_table;
5288         if (base < 0x800000) {
5289                 device_printf(sc->sc_dev,
5290                     "Invalid error log pointer 0x%08x\n", base);
5291                 return;
5292         }
5293
5294         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5295                 device_printf(sc->sc_dev, "reading errlog failed\n");
5296                 return;
5297         }
5298
5299         if (!table.valid) {
5300                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5301                 return;
5302         }
5303
5304         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5305                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5306                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5307                     sc->sc_flags, table.valid);
5308         }
5309
5310         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5311             iwm_desc_lookup(table.error_id));
5312         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5313             table.trm_hw_status0);
5314         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5315             table.trm_hw_status1);
5316         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5317         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5318         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5319         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5320         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5321         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5322         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5323         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5324         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5325         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5326         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5327         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5328             table.fw_rev_type);
5329         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5330         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5331         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5332         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5333         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5334         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5335         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5336         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5337         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5338         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5339         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5340         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5341         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5342         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5343         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5344         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5345         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5346         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5347         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5348
5349         if (sc->umac_error_event_table)
5350                 iwm_nic_umac_error(sc);
5351 }
5352 #endif
5353
5354 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5355
5356 /*
5357  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5358  * Basic structure from if_iwn
5359  */
5360 static void
5361 iwm_notif_intr(struct iwm_softc *sc)
5362 {
5363         struct ieee80211com *ic = &sc->sc_ic;
5364         uint16_t hw;
5365
5366         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5367             BUS_DMASYNC_POSTREAD);
5368
5369         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5370
5371         /*
5372          * Process responses
5373          */
5374         while (sc->rxq.cur != hw) {
5375                 struct iwm_rx_ring *ring = &sc->rxq;
5376                 struct iwm_rx_data *data = &ring->data[ring->cur];
5377                 struct iwm_rx_packet *pkt;
5378                 struct iwm_cmd_response *cresp;
5379                 int qid, idx, code;
5380
5381                 bus_dmamap_sync(ring->data_dmat, data->map,
5382                     BUS_DMASYNC_POSTREAD);
5383                 pkt = mtod(data->m, struct iwm_rx_packet *);
5384
5385                 qid = pkt->hdr.qid & ~0x80;
5386                 idx = pkt->hdr.idx;
5387
5388                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5389                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5390                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5391                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5392
5393                 /*
5394                  * randomly get these from the firmware, no idea why.
5395                  * they at least seem harmless, so just ignore them for now
5396                  */
5397                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5398                     || pkt->len_n_flags == htole32(0x55550000))) {
5399                         ADVANCE_RXQ(sc);
5400                         continue;
5401                 }
5402
5403                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5404
5405                 switch (code) {
5406                 case IWM_REPLY_RX_PHY_CMD:
5407                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5408                         break;
5409
5410                 case IWM_REPLY_RX_MPDU_CMD:
5411                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5412                         break;
5413
5414                 case IWM_TX_CMD:
5415                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5416                         break;
5417
5418                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5419                         struct iwm_missed_beacons_notif *resp;
5420                         int missed;
5421
5422                         /* XXX look at mac_id to determine interface ID */
5423                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5424
5425                         resp = (void *)pkt->data;
5426                         missed = le32toh(resp->consec_missed_beacons);
5427
5428                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5429                             "%s: MISSED_BEACON: mac_id=%d, "
5430                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5431                             "num_rx=%d\n",
5432                             __func__,
5433                             le32toh(resp->mac_id),
5434                             le32toh(resp->consec_missed_beacons_since_last_rx),
5435                             le32toh(resp->consec_missed_beacons),
5436                             le32toh(resp->num_expected_beacons),
5437                             le32toh(resp->num_recvd_beacons));
5438
5439                         /* Be paranoid */
5440                         if (vap == NULL)
5441                                 break;
5442
5443                         /* XXX no net80211 locking? */
5444                         if (vap->iv_state == IEEE80211_S_RUN &&
5445                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5446                                 if (missed > vap->iv_bmissthreshold) {
5447                                         /* XXX bad locking; turn into task */
5448                                         IWM_UNLOCK(sc);
5449                                         ieee80211_beacon_miss(ic);
5450                                         IWM_LOCK(sc);
5451                                 }
5452                         }
5453
5454                         break; }
5455
5456                 case IWM_MFUART_LOAD_NOTIFICATION:
5457                         break;
5458
5459                 case IWM_MVM_ALIVE:
5460                         break;
5461
5462                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5463                         break;
5464
5465                 case IWM_STATISTICS_NOTIFICATION: {
5466                         struct iwm_notif_statistics *stats;
5467                         stats = (void *)pkt->data;
5468                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5469                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5470                         break; }
5471
5472                 case IWM_NVM_ACCESS_CMD:
5473                 case IWM_MCC_UPDATE_CMD:
5474                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5475                                 memcpy(sc->sc_cmd_resp,
5476                                     pkt, sizeof(sc->sc_cmd_resp));
5477                         }
5478                         break;
5479
5480                 case IWM_MCC_CHUB_UPDATE_CMD: {
5481                         struct iwm_mcc_chub_notif *notif;
5482                         notif = (void *)pkt->data;
5483
5484                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5485                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5486                         sc->sc_fw_mcc[2] = '\0';
5487                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5488                             "fw source %d sent CC '%s'\n",
5489                             notif->source_id, sc->sc_fw_mcc);
5490                         break; }
5491
5492                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5493                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5494                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5495                         struct iwm_dts_measurement_notif_v1 *notif;
5496
5497                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5498                                 device_printf(sc->sc_dev,
5499                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5500                                 break;
5501                         }
5502                         notif = (void *)pkt->data;
5503                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5504                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5505                             notif->temp);
5506                         break;
5507                 }
5508
5509                 case IWM_PHY_CONFIGURATION_CMD:
5510                 case IWM_TX_ANT_CONFIGURATION_CMD:
5511                 case IWM_ADD_STA:
5512                 case IWM_MAC_CONTEXT_CMD:
5513                 case IWM_REPLY_SF_CFG_CMD:
5514                 case IWM_POWER_TABLE_CMD:
5515                 case IWM_PHY_CONTEXT_CMD:
5516                 case IWM_BINDING_CONTEXT_CMD:
5517                 case IWM_TIME_EVENT_CMD:
5518                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5519                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5520                 case IWM_SCAN_ABORT_UMAC:
5521                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5522                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5523                 case IWM_REPLY_BEACON_FILTERING_CMD:
5524                 case IWM_MAC_PM_POWER_TABLE:
5525                 case IWM_TIME_QUOTA_CMD:
5526                 case IWM_REMOVE_STA:
5527                 case IWM_TXPATH_FLUSH:
5528                 case IWM_LQ_CMD:
5529                 case IWM_FW_PAGING_BLOCK_CMD:
5530                 case IWM_BT_CONFIG:
5531                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5532                         cresp = (void *)pkt->data;
5533                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5534                                 memcpy(sc->sc_cmd_resp,
5535                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5536                         }
5537                         break;
5538
5539                 /* ignore */
5540                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5541                         break;
5542
5543                 case IWM_INIT_COMPLETE_NOTIF:
5544                         break;
5545
5546                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5547                         struct iwm_periodic_scan_complete *notif;
5548                         notif = (void *)pkt->data;
5549
5550                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5551                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5552                                 ieee80211_runtask(ic, &sc->sc_es_task);
5553                         }
5554                         break;
5555                 }
5556
5557                 case IWM_SCAN_ITERATION_COMPLETE: {
5558                         struct iwm_lmac_scan_complete_notif *notif;
5559                         notif = (void *)pkt->data;
5560                         break;
5561                 }
5562
5563                 case IWM_SCAN_COMPLETE_UMAC: {
5564                         struct iwm_umac_scan_complete *notif;
5565                         notif = (void *)pkt->data;
5566
5567                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5568                             "UMAC scan complete, status=0x%x\n",
5569                             notif->status);
5570
5571                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5572                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5573                                 ieee80211_runtask(ic, &sc->sc_es_task);
5574                         }
5575                         break;
5576                 }
5577
5578                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5579                         struct iwm_umac_scan_iter_complete_notif *notif;
5580                         notif = (void *)pkt->data;
5581
5582                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5583                             "complete, status=0x%x, %d channels scanned\n",
5584                             notif->status, notif->scanned_channels);
5585                         break;
5586                 }
5587
5588                 case IWM_REPLY_ERROR: {
5589                         struct iwm_error_resp *resp;
5590                         resp = (void *)pkt->data;
5591
5592                         device_printf(sc->sc_dev,
5593                             "firmware error 0x%x, cmd 0x%x\n",
5594                             le32toh(resp->error_type),
5595                             resp->cmd_id);
5596                         break; }
5597
5598                 case IWM_TIME_EVENT_NOTIFICATION: {
5599                         struct iwm_time_event_notif *notif;
5600                         notif = (void *)pkt->data;
5601
5602                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5603                             "TE notif status = 0x%x action = 0x%x\n",
5604                             notif->status, notif->action);
5605                         break; }
5606
5607                 case IWM_MCAST_FILTER_CMD:
5608                         break;
5609
5610                 case IWM_SCD_QUEUE_CFG: {
5611                         struct iwm_scd_txq_cfg_rsp *rsp;
5612                         rsp = (void *)pkt->data;
5613
5614                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5615                             "queue cfg token=0x%x sta_id=%d "
5616                             "tid=%d scd_queue=%d\n",
5617                             rsp->token, rsp->sta_id, rsp->tid,
5618                             rsp->scd_queue);
5619                         break;
5620                 }
5621
5622                 default:
5623                         device_printf(sc->sc_dev,
5624                             "frame %d/%d %x UNHANDLED (this should "
5625                             "not happen)\n", qid, idx,
5626                             pkt->len_n_flags);
5627                         break;
5628                 }
5629
5630                 /*
5631                  * Why test bit 0x80?  The Linux driver:
5632                  *
5633                  * There is one exception:  uCode sets bit 15 when it
5634                  * originates the response/notification, i.e. when the
5635                  * response/notification is not a direct response to a
5636                  * command sent by the driver.  For example, uCode issues
5637                  * IWM_REPLY_RX when it sends a received frame to the driver;
5638                  * it is not a direct response to any driver command.
5639                  *
5640                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5641                  * uses a slightly different format for pkt->hdr, and "qid"
5642                  * is actually the upper byte of a two-byte field.
5643                  */
5644                 if (!(pkt->hdr.qid & (1 << 7))) {
5645                         iwm_cmd_done(sc, pkt);
5646                 }
5647
5648                 ADVANCE_RXQ(sc);
5649         }
5650
5651         /*
5652          * Tell the firmware what we have processed.
5653          * Seems like the hardware gets upset unless we align
5654          * the write by 8??
5655          */
5656         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5657         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5658 }
5659
5660 static void
5661 iwm_intr(void *arg)
5662 {
5663         struct iwm_softc *sc = arg;
5664         int handled = 0;
5665         int r1, r2, rv = 0;
5666         int isperiodic = 0;
5667
5668 #if defined(__DragonFly__)
5669         if (sc->sc_mem == NULL) {
5670                 kprintf("iwm_intr: detached\n");
5671                 return;
5672         }
5673 #endif
5674         IWM_LOCK(sc);
5675         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5676
5677         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5678                 uint32_t *ict = sc->ict_dma.vaddr;
5679                 int tmp;
5680
5681                 tmp = htole32(ict[sc->ict_cur]);
5682                 if (!tmp)
5683                         goto out_ena;
5684
5685                 /*
5686                  * ok, there was something.  keep plowing until we have all.
5687                  */
5688                 r1 = r2 = 0;
5689                 while (tmp) {
5690                         r1 |= tmp;
5691                         ict[sc->ict_cur] = 0;
5692                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5693                         tmp = htole32(ict[sc->ict_cur]);
5694                 }
5695
5696                 /* this is where the fun begins.  don't ask */
5697                 if (r1 == 0xffffffff)
5698                         r1 = 0;
5699
5700                 /* i am not expected to understand this */
5701                 if (r1 & 0xc0000)
5702                         r1 |= 0x8000;
5703                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5704         } else {
5705                 r1 = IWM_READ(sc, IWM_CSR_INT);
5706                 /* "hardware gone" (where, fishing?) */
5707                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5708                         goto out;
5709                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5710         }
5711         if (r1 == 0 && r2 == 0) {
5712                 goto out_ena;
5713         }
5714
5715         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5716
5717         /* Safely ignore these bits for debug checks below */
5718         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5719
5720         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5721                 int i;
5722                 struct ieee80211com *ic = &sc->sc_ic;
5723                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5724
5725 #ifdef IWM_DEBUG
5726                 iwm_nic_error(sc);
5727 #endif
5728                 /* Dump driver status (TX and RX rings) while we're here. */
5729                 device_printf(sc->sc_dev, "driver status:\n");
5730                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5731                         struct iwm_tx_ring *ring = &sc->txq[i];
5732                         device_printf(sc->sc_dev,
5733                             "  tx ring %2d: qid=%-2d cur=%-3d "
5734                             "queued=%-3d\n",
5735                             i, ring->qid, ring->cur, ring->queued);
5736                 }
5737                 device_printf(sc->sc_dev,
5738                     "  rx ring: cur=%d\n", sc->rxq.cur);
5739                 device_printf(sc->sc_dev,
5740                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5741
5742                 /* Don't stop the device; just do a VAP restart */
5743                 IWM_UNLOCK(sc);
5744
5745                 if (vap == NULL) {
5746                         kprintf("%s: null vap\n", __func__);
5747                         return;
5748                 }
5749
5750                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5751                     "restarting\n", __func__, vap->iv_state);
5752
5753                 /* XXX TODO: turn this into a callout/taskqueue */
5754                 ieee80211_restart_all(ic);
5755                 return;
5756         }
5757
5758         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5759                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5760                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5761                 iwm_stop(sc);
5762                 rv = 1;
5763                 goto out;
5764         }
5765
5766         /* firmware chunk loaded */
5767         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5768                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5769                 handled |= IWM_CSR_INT_BIT_FH_TX;
5770                 sc->sc_fw_chunk_done = 1;
5771                 wakeup(&sc->sc_fw);
5772         }
5773
5774         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5775                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5776                 if (iwm_check_rfkill(sc)) {
5777                         device_printf(sc->sc_dev,
5778                             "%s: rfkill switch, disabling interface\n",
5779                             __func__);
5780                         iwm_stop(sc);
5781                 }
5782         }
5783
5784         /*
5785          * The Linux driver uses periodic interrupts to avoid races.
5786          * We cargo-cult like it's going out of fashion.
5787          */
5788         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5789                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5790                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5791                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5792                         IWM_WRITE_1(sc,
5793                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5794                 isperiodic = 1;
5795         }
5796
5797         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5798                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5799                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5800
5801                 iwm_notif_intr(sc);
5802
5803                 /* enable periodic interrupt, see above */
5804                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5805                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5806                             IWM_CSR_INT_PERIODIC_ENA);
5807         }
5808
5809         if (__predict_false(r1 & ~handled))
5810                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5811                     "%s: unhandled interrupts: %x\n", __func__, r1);
5812         rv = 1;
5813
5814  out_ena:
5815         iwm_restore_interrupts(sc);
5816  out:
5817         IWM_UNLOCK(sc);
5818         return;
5819 }
5820
5821 /*
5822  * Autoconf glue-sniffing
5823  */
5824 #define PCI_VENDOR_INTEL                0x8086
5825 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5826 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5827 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5828 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5829 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5830 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5831 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5832 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5833 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5834 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5835
5836 static const struct iwm_devices {
5837         uint16_t                device;
5838         const struct iwm_cfg    *cfg;
5839 } iwm_devices[] = {
5840         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5841         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5842         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5843         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5844         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5845         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5846         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5847         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5848         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5849         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5850 };
5851
5852 static int
5853 iwm_probe(device_t dev)
5854 {
5855         int i;
5856
5857         for (i = 0; i < nitems(iwm_devices); i++) {
5858                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5859                     pci_get_device(dev) == iwm_devices[i].device) {
5860                         device_set_desc(dev, iwm_devices[i].cfg->name);
5861                         return (BUS_PROBE_DEFAULT);
5862                 }
5863         }
5864
5865         return (ENXIO);
5866 }
5867
5868 static int
5869 iwm_dev_check(device_t dev)
5870 {
5871         struct iwm_softc *sc;
5872         uint16_t devid;
5873         int i;
5874
5875         sc = device_get_softc(dev);
5876
5877         devid = pci_get_device(dev);
5878         for (i = 0; i < NELEM(iwm_devices); i++) {
5879                 if (iwm_devices[i].device == devid) {
5880                         sc->cfg = iwm_devices[i].cfg;
5881                         return (0);
5882                 }
5883         }
5884         device_printf(dev, "unknown adapter type\n");
5885         return ENXIO;
5886 }
5887
5888 /* PCI registers */
5889 #define PCI_CFG_RETRY_TIMEOUT   0x041
5890
5891 static int
5892 iwm_pci_attach(device_t dev)
5893 {
5894         struct iwm_softc *sc;
5895         int count, error, rid;
5896         uint16_t reg;
5897 #if defined(__DragonFly__)
5898         int irq_flags;
5899 #endif
5900
5901         sc = device_get_softc(dev);
5902
5903         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5904          * PCI Tx retries from interfering with C3 CPU state */
5905         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5906
5907         /* Enable bus-mastering and hardware bug workaround. */
5908         pci_enable_busmaster(dev);
5909         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5910         /* if !MSI */
5911         if (reg & PCIM_STATUS_INTxSTATE) {
5912                 reg &= ~PCIM_STATUS_INTxSTATE;
5913         }
5914         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5915
5916         rid = PCIR_BAR(0);
5917         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5918             RF_ACTIVE);
5919         if (sc->sc_mem == NULL) {
5920                 device_printf(sc->sc_dev, "can't map mem space\n");
5921                 return (ENXIO);
5922         }
5923         sc->sc_st = rman_get_bustag(sc->sc_mem);
5924         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5925
5926         /* Install interrupt handler. */
5927         count = 1;
5928         rid = 0;
5929 #if defined(__DragonFly__)
5930         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5931         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5932 #else
5933         if (pci_alloc_msi(dev, &count) == 0)
5934                 rid = 1;
5935         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5936             (rid != 0 ? 0 : RF_SHAREABLE));
5937 #endif
5938         if (sc->sc_irq == NULL) {
5939                 device_printf(dev, "can't map interrupt\n");
5940                         return (ENXIO);
5941         }
5942 #if defined(__DragonFly__)
5943         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5944                                iwm_intr, sc, &sc->sc_ih,
5945                                &wlan_global_serializer);
5946 #else
5947         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5948             NULL, iwm_intr, sc, &sc->sc_ih);
5949 #endif
5950         if (sc->sc_ih == NULL) {
5951                 device_printf(dev, "can't establish interrupt");
5952 #if defined(__DragonFly__)
5953                 pci_release_msi(dev);
5954 #endif
5955                         return (ENXIO);
5956         }
5957         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5958
5959         return (0);
5960 }
5961
5962 static void
5963 iwm_pci_detach(device_t dev)
5964 {
5965         struct iwm_softc *sc = device_get_softc(dev);
5966
5967         if (sc->sc_irq != NULL) {
5968                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5969                 bus_release_resource(dev, SYS_RES_IRQ,
5970                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5971                 pci_release_msi(dev);
5972 #if defined(__DragonFly__)
5973                 sc->sc_irq = NULL;
5974 #endif
5975         }
5976         if (sc->sc_mem != NULL) {
5977                 bus_release_resource(dev, SYS_RES_MEMORY,
5978                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5979 #if defined(__DragonFly__)
5980                 sc->sc_mem = NULL;
5981 #endif
5982         }
5983 }
5984
5985
5986
5987 static int
5988 iwm_attach(device_t dev)
5989 {
5990         struct iwm_softc *sc = device_get_softc(dev);
5991         struct ieee80211com *ic = &sc->sc_ic;
5992         int error;
5993         int txq_i, i;
5994
5995         sc->sc_dev = dev;
5996         sc->sc_attached = 1;
5997         IWM_LOCK_INIT(sc);
5998         mbufq_init(&sc->sc_snd, ifqmaxlen);
5999 #if defined(__DragonFly__)
6000         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
6001 #else
6002         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6003 #endif
6004         callout_init(&sc->sc_led_blink_to);
6005         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6006
6007         sc->sc_notif_wait = iwm_notification_wait_init(sc);
6008         if (sc->sc_notif_wait == NULL) {
6009                 device_printf(dev, "failed to init notification wait struct\n");
6010                 goto fail;
6011         }
6012
6013         /* Init phy db */
6014         sc->sc_phy_db = iwm_phy_db_init(sc);
6015         if (!sc->sc_phy_db) {
6016                 device_printf(dev, "Cannot init phy_db\n");
6017                 goto fail;
6018         }
6019
6020         /* PCI attach */
6021         error = iwm_pci_attach(dev);
6022         if (error != 0)
6023                 goto fail;
6024
6025         sc->sc_wantresp = -1;
6026
6027         /* Check device type */
6028         error = iwm_dev_check(dev);
6029         if (error != 0)
6030                 goto fail;
6031
6032         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6033         /*
6034          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6035          * changed, and now the revision step also includes bit 0-1 (no more
6036          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6037          * in the old format.
6038          */
6039         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6040                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6041                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6042
6043         if (iwm_prepare_card_hw(sc) != 0) {
6044                 device_printf(dev, "could not initialize hardware\n");
6045                 goto fail;
6046         }
6047
6048         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6049                 int ret;
6050                 uint32_t hw_step;
6051
6052                 /*
6053                  * In order to recognize C step the driver should read the
6054                  * chip version id located at the AUX bus MISC address.
6055                  */
6056                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6057                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6058                 DELAY(2);
6059
6060                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6061                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6062                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6063                                    25000);
6064                 if (!ret) {
6065                         device_printf(sc->sc_dev,
6066                             "Failed to wake up the nic\n");
6067                         goto fail;
6068                 }
6069
6070                 if (iwm_nic_lock(sc)) {
6071                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6072                         hw_step |= IWM_ENABLE_WFPM;
6073                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6074                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6075                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6076                         if (hw_step == 0x3)
6077                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6078                                                 (IWM_SILICON_C_STEP << 2);
6079                         iwm_nic_unlock(sc);
6080                 } else {
6081                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6082                         goto fail;
6083                 }
6084         }
6085
6086         /* special-case 7265D, it has the same PCI IDs. */
6087         if (sc->cfg == &iwm7265_cfg &&
6088             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6089                 sc->cfg = &iwm7265d_cfg;
6090         }
6091
6092         /* Allocate DMA memory for firmware transfers. */
6093         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6094                 device_printf(dev, "could not allocate memory for firmware\n");
6095                 goto fail;
6096         }
6097
6098         /* Allocate "Keep Warm" page. */
6099         if ((error = iwm_alloc_kw(sc)) != 0) {
6100                 device_printf(dev, "could not allocate keep warm page\n");
6101                 goto fail;
6102         }
6103
6104         /* We use ICT interrupts */
6105         if ((error = iwm_alloc_ict(sc)) != 0) {
6106                 device_printf(dev, "could not allocate ICT table\n");
6107                 goto fail;
6108         }
6109
6110         /* Allocate TX scheduler "rings". */
6111         if ((error = iwm_alloc_sched(sc)) != 0) {
6112                 device_printf(dev, "could not allocate TX scheduler rings\n");
6113                 goto fail;
6114         }
6115
6116         /* Allocate TX rings */
6117         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6118                 if ((error = iwm_alloc_tx_ring(sc,
6119                     &sc->txq[txq_i], txq_i)) != 0) {
6120                         device_printf(dev,
6121                             "could not allocate TX ring %d\n",
6122                             txq_i);
6123                         goto fail;
6124                 }
6125         }
6126
6127         /* Allocate RX ring. */
6128         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6129                 device_printf(dev, "could not allocate RX ring\n");
6130                 goto fail;
6131         }
6132
6133         /* Clear pending interrupts. */
6134         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6135
6136         ic->ic_softc = sc;
6137         ic->ic_name = device_get_nameunit(sc->sc_dev);
6138         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6139         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6140
6141         /* Set device capabilities. */
6142         ic->ic_caps =
6143             IEEE80211_C_STA |
6144             IEEE80211_C_WPA |           /* WPA/RSN */
6145             IEEE80211_C_WME |
6146             IEEE80211_C_SHSLOT |        /* short slot time supported */
6147             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6148 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6149             ;
6150         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6151                 sc->sc_phyctxt[i].id = i;
6152                 sc->sc_phyctxt[i].color = 0;
6153                 sc->sc_phyctxt[i].ref = 0;
6154                 sc->sc_phyctxt[i].channel = NULL;
6155         }
6156
6157         /* Default noise floor */
6158         sc->sc_noise = -96;
6159
6160         /* Max RSSI */
6161         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6162
6163         sc->sc_preinit_hook.ich_func = iwm_preinit;
6164         sc->sc_preinit_hook.ich_arg = sc;
6165         sc->sc_preinit_hook.ich_desc = "iwm";
6166         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6167                 device_printf(dev, "config_intrhook_establish failed\n");
6168                 goto fail;
6169         }
6170
6171 #ifdef IWM_DEBUG
6172         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6173             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6174             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6175 #endif
6176
6177         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6178             "<-%s\n", __func__);
6179
6180         return 0;
6181
6182         /* Free allocated memory if something failed during attachment. */
6183 fail:
6184         iwm_detach_local(sc, 0);
6185
6186         return ENXIO;
6187 }
6188
6189 static int
6190 iwm_is_valid_ether_addr(uint8_t *addr)
6191 {
6192         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6193
6194         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6195                 return (FALSE);
6196
6197         return (TRUE);
6198 }
6199
6200 static int
6201 iwm_update_edca(struct ieee80211com *ic)
6202 {
6203         struct iwm_softc *sc = ic->ic_softc;
6204
6205         device_printf(sc->sc_dev, "%s: called\n", __func__);
6206         return (0);
6207 }
6208
6209 static void
6210 iwm_preinit(void *arg)
6211 {
6212         struct iwm_softc *sc = arg;
6213         device_t dev = sc->sc_dev;
6214         struct ieee80211com *ic = &sc->sc_ic;
6215         int error;
6216
6217         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6218             "->%s\n", __func__);
6219
6220         IWM_LOCK(sc);
6221         if ((error = iwm_start_hw(sc)) != 0) {
6222                 device_printf(dev, "could not initialize hardware\n");
6223                 IWM_UNLOCK(sc);
6224                 goto fail;
6225         }
6226
6227         error = iwm_run_init_mvm_ucode(sc, 1);
6228         iwm_stop_device(sc);
6229         if (error) {
6230                 IWM_UNLOCK(sc);
6231                 goto fail;
6232         }
6233         device_printf(dev,
6234             "hw rev 0x%x, fw ver %s, address %s\n",
6235             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6236             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6237
6238         /* not all hardware can do 5GHz band */
6239         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6240                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6241                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6242         IWM_UNLOCK(sc);
6243
6244         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6245             ic->ic_channels);
6246
6247         /*
6248          * At this point we've committed - if we fail to do setup,
6249          * we now also have to tear down the net80211 state.
6250          */
6251         ieee80211_ifattach(ic);
6252         ic->ic_vap_create = iwm_vap_create;
6253         ic->ic_vap_delete = iwm_vap_delete;
6254         ic->ic_raw_xmit = iwm_raw_xmit;
6255         ic->ic_node_alloc = iwm_node_alloc;
6256         ic->ic_scan_start = iwm_scan_start;
6257         ic->ic_scan_end = iwm_scan_end;
6258         ic->ic_update_mcast = iwm_update_mcast;
6259         ic->ic_getradiocaps = iwm_init_channel_map;
6260         ic->ic_set_channel = iwm_set_channel;
6261         ic->ic_scan_curchan = iwm_scan_curchan;
6262         ic->ic_scan_mindwell = iwm_scan_mindwell;
6263         ic->ic_wme.wme_update = iwm_update_edca;
6264         ic->ic_parent = iwm_parent;
6265         ic->ic_transmit = iwm_transmit;
6266         iwm_radiotap_attach(sc);
6267         if (bootverbose)
6268                 ieee80211_announce(ic);
6269
6270         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6271             "<-%s\n", __func__);
6272         config_intrhook_disestablish(&sc->sc_preinit_hook);
6273
6274         return;
6275 fail:
6276         config_intrhook_disestablish(&sc->sc_preinit_hook);
6277         iwm_detach_local(sc, 0);
6278 }
6279
6280 /*
6281  * Attach the interface to 802.11 radiotap.
6282  */
6283 static void
6284 iwm_radiotap_attach(struct iwm_softc *sc)
6285 {
6286         struct ieee80211com *ic = &sc->sc_ic;
6287
6288         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6289             "->%s begin\n", __func__);
6290         ieee80211_radiotap_attach(ic,
6291             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6292                 IWM_TX_RADIOTAP_PRESENT,
6293             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6294                 IWM_RX_RADIOTAP_PRESENT);
6295         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6296             "->%s end\n", __func__);
6297 }
6298
6299 static struct ieee80211vap *
6300 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6301     enum ieee80211_opmode opmode, int flags,
6302     const uint8_t bssid[IEEE80211_ADDR_LEN],
6303     const uint8_t mac[IEEE80211_ADDR_LEN])
6304 {
6305         struct iwm_vap *ivp;
6306         struct ieee80211vap *vap;
6307
6308         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6309                 return NULL;
6310         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6311         vap = &ivp->iv_vap;
6312         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6313         vap->iv_bmissthreshold = 10;            /* override default */
6314         /* Override with driver methods. */
6315         ivp->iv_newstate = vap->iv_newstate;
6316         vap->iv_newstate = iwm_newstate;
6317
6318         ieee80211_ratectl_init(vap);
6319         /* Complete setup. */
6320         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6321             mac);
6322         ic->ic_opmode = opmode;
6323
6324         return vap;
6325 }
6326
6327 static void
6328 iwm_vap_delete(struct ieee80211vap *vap)
6329 {
6330         struct iwm_vap *ivp = IWM_VAP(vap);
6331
6332         ieee80211_ratectl_deinit(vap);
6333         ieee80211_vap_detach(vap);
6334         kfree(ivp, M_80211_VAP);
6335 }
6336
6337 static void
6338 iwm_scan_start(struct ieee80211com *ic)
6339 {
6340         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6341         struct iwm_softc *sc = ic->ic_softc;
6342         int error;
6343
6344         IWM_LOCK(sc);
6345         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6346                 /* This should not be possible */
6347                 device_printf(sc->sc_dev,
6348                     "%s: Previous scan not completed yet\n", __func__);
6349         }
6350         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6351                 error = iwm_mvm_umac_scan(sc);
6352         else
6353                 error = iwm_mvm_lmac_scan(sc);
6354         if (error != 0) {
6355                 device_printf(sc->sc_dev, "could not initiate scan\n");
6356                 IWM_UNLOCK(sc);
6357                 ieee80211_cancel_scan(vap);
6358         } else {
6359                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6360                 iwm_led_blink_start(sc);
6361                 IWM_UNLOCK(sc);
6362         }
6363 }
6364
6365 static void
6366 iwm_scan_end(struct ieee80211com *ic)
6367 {
6368         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6369         struct iwm_softc *sc = ic->ic_softc;
6370
6371         IWM_LOCK(sc);
6372         iwm_led_blink_stop(sc);
6373         if (vap->iv_state == IEEE80211_S_RUN)
6374                 iwm_mvm_led_enable(sc);
6375         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6376                 /*
6377                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6378                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6379                  * taskqueue.
6380                  */
6381                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6382                 iwm_mvm_scan_stop_wait(sc);
6383         }
6384         IWM_UNLOCK(sc);
6385
6386         /*
6387          * Make sure we don't race, if sc_es_task is still enqueued here.
6388          * This is to make sure that it won't call ieee80211_scan_done
6389          * when we have already started the next scan.
6390          */
6391         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6392 }
6393
6394 static void
6395 iwm_update_mcast(struct ieee80211com *ic)
6396 {
6397 }
6398
6399 static void
6400 iwm_set_channel(struct ieee80211com *ic)
6401 {
6402 }
6403
6404 static void
6405 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6406 {
6407 }
6408
6409 static void
6410 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6411 {
6412         return;
6413 }
6414
6415 void
6416 iwm_init_task(void *arg1)
6417 {
6418         struct iwm_softc *sc = arg1;
6419
6420         IWM_LOCK(sc);
6421         while (sc->sc_flags & IWM_FLAG_BUSY) {
6422 #if defined(__DragonFly__)
6423                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6424 #else
6425                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6426 #endif
6427 }
6428         sc->sc_flags |= IWM_FLAG_BUSY;
6429         iwm_stop(sc);
6430         if (sc->sc_ic.ic_nrunning > 0)
6431                 iwm_init(sc);
6432         sc->sc_flags &= ~IWM_FLAG_BUSY;
6433         wakeup(&sc->sc_flags);
6434         IWM_UNLOCK(sc);
6435 }
6436
6437 static int
6438 iwm_resume(device_t dev)
6439 {
6440         struct iwm_softc *sc = device_get_softc(dev);
6441         int do_reinit = 0;
6442
6443         /*
6444          * We disable the RETRY_TIMEOUT register (0x41) to keep
6445          * PCI Tx retries from interfering with C3 CPU state.
6446          */
6447         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6448         iwm_init_task(device_get_softc(dev));
6449
6450         IWM_LOCK(sc);
6451         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6452                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6453                 do_reinit = 1;
6454         }
6455         IWM_UNLOCK(sc);
6456
6457         if (do_reinit)
6458                 ieee80211_resume_all(&sc->sc_ic);
6459
6460         return 0;
6461 }
6462
6463 static int
6464 iwm_suspend(device_t dev)
6465 {
6466         int do_stop = 0;
6467         struct iwm_softc *sc = device_get_softc(dev);
6468
6469         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6470
6471         ieee80211_suspend_all(&sc->sc_ic);
6472
6473         if (do_stop) {
6474                 IWM_LOCK(sc);
6475                 iwm_stop(sc);
6476                 sc->sc_flags |= IWM_FLAG_SCANNING;
6477                 IWM_UNLOCK(sc);
6478         }
6479
6480         return (0);
6481 }
6482
6483 static int
6484 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6485 {
6486         struct iwm_fw_info *fw = &sc->sc_fw;
6487         device_t dev = sc->sc_dev;
6488         int i;
6489
6490         if (!sc->sc_attached)
6491                 return 0;
6492         sc->sc_attached = 0;
6493         if (do_net80211) {
6494                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6495         }
6496         callout_drain(&sc->sc_led_blink_to);
6497         callout_drain(&sc->sc_watchdog_to);
6498         iwm_stop_device(sc);
6499         if (do_net80211) {
6500                 ieee80211_ifdetach(&sc->sc_ic);
6501         }
6502
6503         iwm_phy_db_free(sc->sc_phy_db);
6504         sc->sc_phy_db = NULL;
6505
6506         iwm_free_nvm_data(sc->nvm_data);
6507
6508         /* Free descriptor rings */
6509         iwm_free_rx_ring(sc, &sc->rxq);
6510         for (i = 0; i < nitems(sc->txq); i++)
6511                 iwm_free_tx_ring(sc, &sc->txq[i]);
6512
6513         /* Free firmware */
6514         if (fw->fw_fp != NULL)
6515                 iwm_fw_info_free(fw);
6516
6517         /* Free scheduler */
6518         iwm_dma_contig_free(&sc->sched_dma);
6519         iwm_dma_contig_free(&sc->ict_dma);
6520         iwm_dma_contig_free(&sc->kw_dma);
6521         iwm_dma_contig_free(&sc->fw_dma);
6522
6523         iwm_free_fw_paging(sc);
6524
6525         /* Finished with the hardware - detach things */
6526         iwm_pci_detach(dev);
6527
6528         if (sc->sc_notif_wait != NULL) {
6529                 iwm_notification_wait_free(sc->sc_notif_wait);
6530                 sc->sc_notif_wait = NULL;
6531         }
6532
6533         mbufq_drain(&sc->sc_snd);
6534         IWM_LOCK_DESTROY(sc);
6535
6536         return (0);
6537 }
6538
6539 static int
6540 iwm_detach(device_t dev)
6541 {
6542         struct iwm_softc *sc = device_get_softc(dev);
6543
6544         return (iwm_detach_local(sc, 1));
6545 }
6546
6547 static device_method_t iwm_pci_methods[] = {
6548         /* Device interface */
6549         DEVMETHOD(device_probe,         iwm_probe),
6550         DEVMETHOD(device_attach,        iwm_attach),
6551         DEVMETHOD(device_detach,        iwm_detach),
6552         DEVMETHOD(device_suspend,       iwm_suspend),
6553         DEVMETHOD(device_resume,        iwm_resume),
6554
6555         DEVMETHOD_END
6556 };
6557
6558 static driver_t iwm_pci_driver = {
6559         "iwm",
6560         iwm_pci_methods,
6561         sizeof (struct iwm_softc)
6562 };
6563
6564 static devclass_t iwm_devclass;
6565
6566 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6567 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6568 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6569 MODULE_DEPEND(iwm, wlan, 1, 1, 1);