1630ca7b5932f73563d23882aacc85b2131d572c
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150
151 #include <machine/endian.h>
152
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155
156 #include <net/bpf.h>
157
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_sf.h"
189 #include "if_iwm_sta.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192 #include "if_iwm_fw.h"
193
194 const uint8_t iwm_nvm_channels[] = {
195         /* 2.4 GHz */
196         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
197         /* 5 GHz */
198         36, 40, 44, 48, 52, 56, 60, 64,
199         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
200         149, 153, 157, 161, 165
201 };
202 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
203     "IWM_NUM_CHANNELS is too small");
204
205 const uint8_t iwm_nvm_channels_8000[] = {
206         /* 2.4 GHz */
207         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
208         /* 5 GHz */
209         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
210         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
211         149, 153, 157, 161, 165, 169, 173, 177, 181
212 };
213 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
214     "IWM_NUM_CHANNELS_8000 is too small");
215
216 #define IWM_NUM_2GHZ_CHANNELS   14
217 #define IWM_N_HW_ADDR_MASK      0xF
218
219 /*
220  * XXX For now, there's simply a fixed set of rate table entries
221  * that are populated.
222  */
223 const struct iwm_rate {
224         uint8_t rate;
225         uint8_t plcp;
226 } iwm_rates[] = {
227         {   2,  IWM_RATE_1M_PLCP  },
228         {   4,  IWM_RATE_2M_PLCP  },
229         {  11,  IWM_RATE_5M_PLCP  },
230         {  22,  IWM_RATE_11M_PLCP },
231         {  12,  IWM_RATE_6M_PLCP  },
232         {  18,  IWM_RATE_9M_PLCP  },
233         {  24,  IWM_RATE_12M_PLCP },
234         {  36,  IWM_RATE_18M_PLCP },
235         {  48,  IWM_RATE_24M_PLCP },
236         {  72,  IWM_RATE_36M_PLCP },
237         {  96,  IWM_RATE_48M_PLCP },
238         { 108,  IWM_RATE_54M_PLCP },
239 };
240 #define IWM_RIDX_CCK    0
241 #define IWM_RIDX_OFDM   4
242 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
243 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
244 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
245
246 struct iwm_nvm_section {
247         uint16_t length;
248         uint8_t *data;
249 };
250
251 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
252 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
253
254 struct iwm_mvm_alive_data {
255         int valid;
256         uint32_t scd_base_addr;
257 };
258
259 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
260 static int      iwm_firmware_store_section(struct iwm_softc *,
261                                            enum iwm_ucode_type,
262                                            const uint8_t *, size_t);
263 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
264 static void     iwm_fw_info_free(struct iwm_fw_info *);
265 static int      iwm_read_firmware(struct iwm_softc *);
266 #if !defined(__DragonFly__)
267 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
268 #endif
269 static int      iwm_alloc_fwmem(struct iwm_softc *);
270 static int      iwm_alloc_sched(struct iwm_softc *);
271 static int      iwm_alloc_kw(struct iwm_softc *);
272 static int      iwm_alloc_ict(struct iwm_softc *);
273 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
276 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
277                                   int);
278 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
280 static void     iwm_enable_interrupts(struct iwm_softc *);
281 static void     iwm_restore_interrupts(struct iwm_softc *);
282 static void     iwm_disable_interrupts(struct iwm_softc *);
283 static void     iwm_ict_reset(struct iwm_softc *);
284 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
285 static void     iwm_stop_device(struct iwm_softc *);
286 static void     iwm_mvm_nic_config(struct iwm_softc *);
287 static int      iwm_nic_rx_init(struct iwm_softc *);
288 static int      iwm_nic_tx_init(struct iwm_softc *);
289 static int      iwm_nic_init(struct iwm_softc *);
290 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
291 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294                                      uint16_t *, uint32_t);
295 static uint32_t iwm_eeprom_channel_flags(uint16_t);
296 static void     iwm_add_channel_band(struct iwm_softc *,
297                     struct ieee80211_channel[], int, int *, int, size_t,
298                     const uint8_t[]);
299 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
300                     struct ieee80211_channel[]);
301 static struct iwm_nvm_data *
302         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
303                            const uint16_t *, const uint16_t *,
304                            const uint16_t *, const uint16_t *,
305                            const uint16_t *);
306 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
307 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
308                                                struct iwm_nvm_data *,
309                                                const uint16_t *,
310                                                const uint16_t *);
311 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312                             const uint16_t *);
313 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
314 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315                                   const uint16_t *);
316 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
317                                    const uint16_t *);
318 static void     iwm_set_radio_cfg(const struct iwm_softc *,
319                                   struct iwm_nvm_data *, uint32_t);
320 static struct iwm_nvm_data *
321         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
322 static int      iwm_nvm_init(struct iwm_softc *);
323 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
324                                       const struct iwm_fw_desc *);
325 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
326                                              bus_addr_t, uint32_t);
327 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
328                                                 const struct iwm_fw_img *,
329                                                 int, int *);
330 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
331                                            const struct iwm_fw_img *,
332                                            int, int *);
333 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
334                                                const struct iwm_fw_img *);
335 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
336                                           const struct iwm_fw_img *);
337 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
338 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
339 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
340 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
341                                               enum iwm_ucode_type);
342 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
343 static int      iwm_mvm_config_ltr(struct iwm_softc *sc);
344 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
345 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
346                                             struct iwm_rx_phy_info *);
347 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
348                                       struct iwm_rx_packet *);
349 static int      iwm_get_noise(struct iwm_softc *,
350                     const struct iwm_mvm_statistics_rx_non_phy *);
351 static void     iwm_mvm_handle_rx_statistics(struct iwm_softc *,
352                     struct iwm_rx_packet *);
353 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
354                                     uint32_t, boolean_t);
355 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
356                                          struct iwm_rx_packet *,
357                                          struct iwm_node *);
358 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
359 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
360 #if 0
361 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
362                                  uint16_t);
363 #endif
364 static uint8_t  iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
365                         struct mbuf *, struct iwm_tx_cmd *);
366 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
367                        struct ieee80211_node *, int);
368 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
369                              const struct ieee80211_bpf_params *);
370 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
371 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
372 static struct ieee80211_node *
373                 iwm_node_alloc(struct ieee80211vap *,
374                                const uint8_t[IEEE80211_ADDR_LEN]);
375 static uint8_t  iwm_rate_from_ucode_rate(uint32_t);
376 static int      iwm_rate2ridx(struct iwm_softc *, uint8_t);
377 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
378 static int      iwm_media_change(struct ifnet *);
379 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
380 static void     iwm_endscan_cb(void *, int);
381 static int      iwm_send_bt_init_conf(struct iwm_softc *);
382 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
383 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
384 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int      iwm_init_hw(struct iwm_softc *);
387 static void     iwm_init(struct iwm_softc *);
388 static void     iwm_start(struct iwm_softc *);
389 static void     iwm_stop(struct iwm_softc *);
390 static void     iwm_watchdog(void *);
391 static void     iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394                 iwm_desc_lookup(uint32_t);
395 static void     iwm_nic_error(struct iwm_softc *);
396 static void     iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
399 static void     iwm_notif_intr(struct iwm_softc *);
400 static void     iwm_intr(void *);
401 static int      iwm_attach(device_t);
402 static int      iwm_is_valid_ether_addr(uint8_t *);
403 static void     iwm_preinit(void *);
404 static int      iwm_detach_local(struct iwm_softc *sc, int);
405 static void     iwm_init_task(void *);
406 static void     iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408                 iwm_vap_create(struct ieee80211com *,
409                                const char [IFNAMSIZ], int,
410                                enum ieee80211_opmode, int,
411                                const uint8_t [IEEE80211_ADDR_LEN],
412                                const uint8_t [IEEE80211_ADDR_LEN]);
413 static void     iwm_vap_delete(struct ieee80211vap *);
414 static void     iwm_xmit_queue_drain(struct iwm_softc *);
415 static void     iwm_scan_start(struct ieee80211com *);
416 static void     iwm_scan_end(struct ieee80211com *);
417 static void     iwm_update_mcast(struct ieee80211com *);
418 static void     iwm_set_channel(struct ieee80211com *);
419 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
420 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
421 static int      iwm_detach(device_t);
422
423 #if defined(__DragonFly__)
424 static int      iwm_msi_enable = 1;
425
426 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
427 #endif
428
429 static int      iwm_lar_disable = 0;
430 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
431
432 /*
433  * Firmware parser.
434  */
435
436 static int
437 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
438 {
439         const struct iwm_fw_cscheme_list *l = (const void *)data;
440
441         if (dlen < sizeof(*l) ||
442             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
443                 return EINVAL;
444
445         /* we don't actually store anything for now, always use s/w crypto */
446
447         return 0;
448 }
449
450 static int
451 iwm_firmware_store_section(struct iwm_softc *sc,
452     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
453 {
454         struct iwm_fw_img *fws;
455         struct iwm_fw_desc *fwone;
456
457         if (type >= IWM_UCODE_TYPE_MAX)
458                 return EINVAL;
459         if (dlen < sizeof(uint32_t))
460                 return EINVAL;
461
462         fws = &sc->sc_fw.img[type];
463         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
464                 return EINVAL;
465
466         fwone = &fws->sec[fws->fw_count];
467
468         /* first 32bit are device load offset */
469         memcpy(&fwone->offset, data, sizeof(uint32_t));
470
471         /* rest is data */
472         fwone->data = data + sizeof(uint32_t);
473         fwone->len = dlen - sizeof(uint32_t);
474
475         fws->fw_count++;
476
477         return 0;
478 }
479
480 #define IWM_DEFAULT_SCAN_CHANNELS 40
481
482 struct iwm_tlv_calib_data {
483         uint32_t ucode_type;
484         struct iwm_tlv_calib_ctrl calib;
485 } __packed;
486
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
489 {
490         const struct iwm_tlv_calib_data *def_calib = data;
491         uint32_t ucode_type = le32toh(def_calib->ucode_type);
492
493         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494                 device_printf(sc->sc_dev,
495                     "Wrong ucode_type %u for default "
496                     "calibration.\n", ucode_type);
497                 return EINVAL;
498         }
499
500         sc->sc_default_calib[ucode_type].flow_trigger =
501             def_calib->calib.flow_trigger;
502         sc->sc_default_calib[ucode_type].event_trigger =
503             def_calib->calib.event_trigger;
504
505         return 0;
506 }
507
508 static int
509 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
510                         struct iwm_ucode_capabilities *capa)
511 {
512         const struct iwm_ucode_api *ucode_api = (const void *)data;
513         uint32_t api_index = le32toh(ucode_api->api_index);
514         uint32_t api_flags = le32toh(ucode_api->api_flags);
515         int i;
516
517         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
518                 device_printf(sc->sc_dev,
519                     "api flags index %d larger than supported by driver\n",
520                     api_index);
521                 /* don't return an error so we can load FW that has more bits */
522                 return 0;
523         }
524
525         for (i = 0; i < 32; i++) {
526                 if (api_flags & (1U << i))
527                         setbit(capa->enabled_api, i + 32 * api_index);
528         }
529
530         return 0;
531 }
532
533 static int
534 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
535                            struct iwm_ucode_capabilities *capa)
536 {
537         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
538         uint32_t api_index = le32toh(ucode_capa->api_index);
539         uint32_t api_flags = le32toh(ucode_capa->api_capa);
540         int i;
541
542         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
543                 device_printf(sc->sc_dev,
544                     "capa flags index %d larger than supported by driver\n",
545                     api_index);
546                 /* don't return an error so we can load FW that has more bits */
547                 return 0;
548         }
549
550         for (i = 0; i < 32; i++) {
551                 if (api_flags & (1U << i))
552                         setbit(capa->enabled_capa, i + 32 * api_index);
553         }
554
555         return 0;
556 }
557
558 static void
559 iwm_fw_info_free(struct iwm_fw_info *fw)
560 {
561         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
562         fw->fw_fp = NULL;
563         memset(fw->img, 0, sizeof(fw->img));
564 }
565
566 static int
567 iwm_read_firmware(struct iwm_softc *sc)
568 {
569         struct iwm_fw_info *fw = &sc->sc_fw;
570         const struct iwm_tlv_ucode_header *uhdr;
571         const struct iwm_ucode_tlv *tlv;
572         struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
573         enum iwm_ucode_tlv_type tlv_type;
574         const struct firmware *fwp;
575         const uint8_t *data;
576         uint32_t tlv_len;
577         uint32_t usniffer_img;
578         const uint8_t *tlv_data;
579         uint32_t paging_mem_size;
580         int num_of_cpus;
581         int error = 0;
582         size_t len;
583
584         /*
585          * Load firmware into driver memory.
586          * fw_fp will be set.
587          */
588         fwp = firmware_get(sc->cfg->fw_name);
589         if (fwp == NULL) {
590                 device_printf(sc->sc_dev,
591                     "could not read firmware %s (error %d)\n",
592                     sc->cfg->fw_name, error);
593                 goto out;
594         }
595         fw->fw_fp = fwp;
596
597         /* (Re-)Initialize default values. */
598         capa->flags = 0;
599         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
600         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
601         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
602         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
603         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
604
605         /*
606          * Parse firmware contents
607          */
608
609         uhdr = (const void *)fw->fw_fp->data;
610         if (*(const uint32_t *)fw->fw_fp->data != 0
611             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
612                 device_printf(sc->sc_dev, "invalid firmware %s\n",
613                     sc->cfg->fw_name);
614                 error = EINVAL;
615                 goto out;
616         }
617
618         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
619             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
620             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
621             IWM_UCODE_API(le32toh(uhdr->ver)));
622         data = uhdr->data;
623         len = fw->fw_fp->datasize - sizeof(*uhdr);
624
625         while (len >= sizeof(*tlv)) {
626                 len -= sizeof(*tlv);
627                 tlv = (const void *)data;
628
629                 tlv_len = le32toh(tlv->length);
630                 tlv_type = le32toh(tlv->type);
631                 tlv_data = tlv->data;
632
633                 if (len < tlv_len) {
634                         device_printf(sc->sc_dev,
635                             "firmware too short: %zu bytes\n",
636                             len);
637                         error = EINVAL;
638                         goto parse_out;
639                 }
640                 len -= roundup2(tlv_len, 4);
641                 data += sizeof(tlv) + roundup2(tlv_len, 4);
642
643                 switch ((int)tlv_type) {
644                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
645                         if (tlv_len != sizeof(uint32_t)) {
646                                 device_printf(sc->sc_dev,
647                                     "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
648                                     __func__, tlv_len);
649                                 error = EINVAL;
650                                 goto parse_out;
651                         }
652                         capa->max_probe_length =
653                             le32_to_cpup((const uint32_t *)tlv_data);
654                         /* limit it to something sensible */
655                         if (capa->max_probe_length >
656                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
657                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
658                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
659                                     "ridiculous\n", __func__);
660                                 error = EINVAL;
661                                 goto parse_out;
662                         }
663                         break;
664                 case IWM_UCODE_TLV_PAN:
665                         if (tlv_len) {
666                                 device_printf(sc->sc_dev,
667                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
668                                     __func__, tlv_len);
669                                 error = EINVAL;
670                                 goto parse_out;
671                         }
672                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
673                         break;
674                 case IWM_UCODE_TLV_FLAGS:
675                         if (tlv_len < sizeof(uint32_t)) {
676                                 device_printf(sc->sc_dev,
677                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
678                                     __func__, tlv_len);
679                                 error = EINVAL;
680                                 goto parse_out;
681                         }
682                         if (tlv_len % sizeof(uint32_t)) {
683                                 device_printf(sc->sc_dev,
684                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
685                                     __func__, tlv_len);
686                                 error = EINVAL;
687                                 goto parse_out;
688                         }
689                         /*
690                          * Apparently there can be many flags, but Linux driver
691                          * parses only the first one, and so do we.
692                          *
693                          * XXX: why does this override IWM_UCODE_TLV_PAN?
694                          * Intentional or a bug?  Observations from
695                          * current firmware file:
696                          *  1) TLV_PAN is parsed first
697                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
698                          * ==> this resets TLV_PAN to itself... hnnnk
699                          */
700                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701                         break;
702                 case IWM_UCODE_TLV_CSCHEME:
703                         if ((error = iwm_store_cscheme(sc,
704                             tlv_data, tlv_len)) != 0) {
705                                 device_printf(sc->sc_dev,
706                                     "%s: iwm_store_cscheme(): returned %d\n",
707                                     __func__, error);
708                                 goto parse_out;
709                         }
710                         break;
711                 case IWM_UCODE_TLV_NUM_OF_CPU:
712                         if (tlv_len != sizeof(uint32_t)) {
713                                 device_printf(sc->sc_dev,
714                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
715                                     __func__, tlv_len);
716                                 error = EINVAL;
717                                 goto parse_out;
718                         }
719                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
720                         if (num_of_cpus == 2) {
721                                 fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
722                                         TRUE;
723                                 fw->img[IWM_UCODE_INIT].is_dual_cpus =
724                                         TRUE;
725                                 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
726                                         TRUE;
727                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
728                                 device_printf(sc->sc_dev,
729                                     "%s: Driver supports only 1 or 2 CPUs\n",
730                                     __func__);
731                                 error = EINVAL;
732                                 goto parse_out;
733                         }
734                         break;
735                 case IWM_UCODE_TLV_SEC_RT:
736                         if ((error = iwm_firmware_store_section(sc,
737                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
738                                 device_printf(sc->sc_dev,
739                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
740                                     __func__, error);
741                                 goto parse_out;
742                         }
743                         break;
744                 case IWM_UCODE_TLV_SEC_INIT:
745                         if ((error = iwm_firmware_store_section(sc,
746                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
747                                 device_printf(sc->sc_dev,
748                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
749                                     __func__, error);
750                                 goto parse_out;
751                         }
752                         break;
753                 case IWM_UCODE_TLV_SEC_WOWLAN:
754                         if ((error = iwm_firmware_store_section(sc,
755                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
756                                 device_printf(sc->sc_dev,
757                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
758                                     __func__, error);
759                                 goto parse_out;
760                         }
761                         break;
762                 case IWM_UCODE_TLV_DEF_CALIB:
763                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764                                 device_printf(sc->sc_dev,
765                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
766                                     __func__, tlv_len,
767                                     sizeof(struct iwm_tlv_calib_data));
768                                 error = EINVAL;
769                                 goto parse_out;
770                         }
771                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
772                                 device_printf(sc->sc_dev,
773                                     "%s: iwm_set_default_calib() failed: %d\n",
774                                     __func__, error);
775                                 goto parse_out;
776                         }
777                         break;
778                 case IWM_UCODE_TLV_PHY_SKU:
779                         if (tlv_len != sizeof(uint32_t)) {
780                                 error = EINVAL;
781                                 device_printf(sc->sc_dev,
782                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
783                                     __func__, tlv_len);
784                                 goto parse_out;
785                         }
786                         sc->sc_fw.phy_config =
787                             le32_to_cpup((const uint32_t *)tlv_data);
788                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
789                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
790                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
791                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
792                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
793                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
794                         break;
795
796                 case IWM_UCODE_TLV_API_CHANGES_SET: {
797                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
798                                 error = EINVAL;
799                                 goto parse_out;
800                         }
801                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
802                                 error = EINVAL;
803                                 goto parse_out;
804                         }
805                         break;
806                 }
807
808                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
809                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
810                                 error = EINVAL;
811                                 goto parse_out;
812                         }
813                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
814                                 error = EINVAL;
815                                 goto parse_out;
816                         }
817                         break;
818                 }
819
820                 case 48: /* undocumented TLV */
821                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
822                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
823                         /* ignore, not used by current driver */
824                         break;
825
826                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
827                         if ((error = iwm_firmware_store_section(sc,
828                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
829                             tlv_len)) != 0)
830                                 goto parse_out;
831                         break;
832
833                 case IWM_UCODE_TLV_PAGING:
834                         if (tlv_len != sizeof(uint32_t)) {
835                                 error = EINVAL;
836                                 goto parse_out;
837                         }
838                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
839
840                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
841                             "%s: Paging: paging enabled (size = %u bytes)\n",
842                             __func__, paging_mem_size);
843                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
844                                 device_printf(sc->sc_dev,
845                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
846                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
847                                 error = EINVAL;
848                                 goto out;
849                         }
850                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
851                                 device_printf(sc->sc_dev,
852                                     "%s: Paging: image isn't multiple %u\n",
853                                     __func__, IWM_FW_PAGING_SIZE);
854                                 error = EINVAL;
855                                 goto out;
856                         }
857
858                         sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
859                             paging_mem_size;
860                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
861                         sc->sc_fw.img[usniffer_img].paging_mem_size =
862                             paging_mem_size;
863                         break;
864
865                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
866                         if (tlv_len != sizeof(uint32_t)) {
867                                 error = EINVAL;
868                                 goto parse_out;
869                         }
870                         capa->n_scan_channels =
871                             le32_to_cpup((const uint32_t *)tlv_data);
872                         break;
873
874                 case IWM_UCODE_TLV_FW_VERSION:
875                         if (tlv_len != sizeof(uint32_t) * 3) {
876                                 error = EINVAL;
877                                 goto parse_out;
878                         }
879                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
880                             "%d.%d.%d",
881                             le32toh(((const uint32_t *)tlv_data)[0]),
882                             le32toh(((const uint32_t *)tlv_data)[1]),
883                             le32toh(((const uint32_t *)tlv_data)[2]));
884                         break;
885
886                 case IWM_UCODE_TLV_FW_MEM_SEG:
887                         break;
888
889                 default:
890                         device_printf(sc->sc_dev,
891                             "%s: unknown firmware section %d, abort\n",
892                             __func__, tlv_type);
893                         error = EINVAL;
894                         goto parse_out;
895                 }
896         }
897
898         KASSERT(error == 0, ("unhandled error"));
899
900  parse_out:
901         if (error) {
902                 device_printf(sc->sc_dev, "firmware parse error %d, "
903                     "section type %d\n", error, tlv_type);
904         }
905
906  out:
907         if (error) {
908                 if (fw->fw_fp != NULL)
909                         iwm_fw_info_free(fw);
910         }
911
912         return error;
913 }
914
915 /*
916  * DMA resource routines
917  */
918
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923         /* Must be aligned on a 16-byte boundary. */
924         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925             IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932         /* TX scheduler rings must be aligned on a 1KB boundary. */
933         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955         bus_size_t size;
956         int i, error;
957
958         ring->cur = 0;
959
960         /* Allocate RX descriptors (256-byte aligned). */
961         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963         if (error != 0) {
964                 device_printf(sc->sc_dev,
965                     "could not allocate RX ring DMA memory\n");
966                 goto fail;
967         }
968         ring->desc = ring->desc_dma.vaddr;
969
970         /* Allocate RX status area (16-byte aligned). */
971         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972             sizeof(*ring->stat), 16);
973         if (error != 0) {
974                 device_printf(sc->sc_dev,
975                     "could not allocate RX status DMA memory\n");
976                 goto fail;
977         }
978         ring->stat = ring->stat_dma.vaddr;
979
980         /* Create RX buffer DMA tag. */
981 #if defined(__DragonFly__)
982         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
983                                    0,
984                                    BUS_SPACE_MAXADDR_32BIT,
985                                    BUS_SPACE_MAXADDR,
986                                    NULL, NULL,
987                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
988                                    BUS_DMA_NOWAIT, &ring->data_dmat);
989 #else
990         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
991             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
992             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
993 #endif
994         if (error != 0) {
995                 device_printf(sc->sc_dev,
996                     "%s: could not create RX buf DMA tag, error %d\n",
997                     __func__, error);
998                 goto fail;
999         }
1000
1001         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1002         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1003         if (error != 0) {
1004                 device_printf(sc->sc_dev,
1005                     "%s: could not create RX buf DMA map, error %d\n",
1006                     __func__, error);
1007                 goto fail;
1008         }
1009         /*
1010          * Allocate and map RX buffers.
1011          */
1012         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1013                 struct iwm_rx_data *data = &ring->data[i];
1014                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1015                 if (error != 0) {
1016                         device_printf(sc->sc_dev,
1017                             "%s: could not create RX buf DMA map, error %d\n",
1018                             __func__, error);
1019                         goto fail;
1020                 }
1021                 data->m = NULL;
1022
1023                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1024                         goto fail;
1025                 }
1026         }
1027         return 0;
1028
1029 fail:   iwm_free_rx_ring(sc, ring);
1030         return error;
1031 }
1032
1033 static void
1034 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1035 {
1036         /* Reset the ring state */
1037         ring->cur = 0;
1038
1039         /*
1040          * The hw rx ring index in shared memory must also be cleared,
1041          * otherwise the discrepancy can cause reprocessing chaos.
1042          */
1043         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1044 }
1045
1046 static void
1047 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1048 {
1049         int i;
1050
1051         iwm_dma_contig_free(&ring->desc_dma);
1052         iwm_dma_contig_free(&ring->stat_dma);
1053
1054         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1055                 struct iwm_rx_data *data = &ring->data[i];
1056
1057                 if (data->m != NULL) {
1058                         bus_dmamap_sync(ring->data_dmat, data->map,
1059                             BUS_DMASYNC_POSTREAD);
1060                         bus_dmamap_unload(ring->data_dmat, data->map);
1061                         m_freem(data->m);
1062                         data->m = NULL;
1063                 }
1064                 if (data->map != NULL) {
1065                         bus_dmamap_destroy(ring->data_dmat, data->map);
1066                         data->map = NULL;
1067                 }
1068         }
1069         if (ring->spare_map != NULL) {
1070                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1071                 ring->spare_map = NULL;
1072         }
1073         if (ring->data_dmat != NULL) {
1074                 bus_dma_tag_destroy(ring->data_dmat);
1075                 ring->data_dmat = NULL;
1076         }
1077 }
1078
1079 static int
1080 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1081 {
1082         bus_addr_t paddr;
1083         bus_size_t size;
1084         size_t maxsize;
1085         int nsegments;
1086         int i, error;
1087
1088         ring->qid = qid;
1089         ring->queued = 0;
1090         ring->cur = 0;
1091
1092         /* Allocate TX descriptors (256-byte aligned). */
1093         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1094         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1095         if (error != 0) {
1096                 device_printf(sc->sc_dev,
1097                     "could not allocate TX ring DMA memory\n");
1098                 goto fail;
1099         }
1100         ring->desc = ring->desc_dma.vaddr;
1101
1102         /*
1103          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1104          * to allocate commands space for other rings.
1105          */
1106         if (qid > IWM_MVM_CMD_QUEUE)
1107                 return 0;
1108
1109         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1110         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1111         if (error != 0) {
1112                 device_printf(sc->sc_dev,
1113                     "could not allocate TX cmd DMA memory\n");
1114                 goto fail;
1115         }
1116         ring->cmd = ring->cmd_dma.vaddr;
1117
1118         /* FW commands may require more mapped space than packets. */
1119         if (qid == IWM_MVM_CMD_QUEUE) {
1120                 maxsize = IWM_RBUF_SIZE;
1121                 nsegments = 1;
1122         } else {
1123                 maxsize = MCLBYTES;
1124                 nsegments = IWM_MAX_SCATTER - 2;
1125         }
1126
1127 #if defined(__DragonFly__)
1128         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1129                                    0,
1130                                    BUS_SPACE_MAXADDR_32BIT,
1131                                    BUS_SPACE_MAXADDR,
1132                                    NULL, NULL,
1133                                    maxsize, nsegments, maxsize,
1134                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1135 #else
1136         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1137             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1138             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1139 #endif
1140         if (error != 0) {
1141                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1142                 goto fail;
1143         }
1144
1145         paddr = ring->cmd_dma.paddr;
1146         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1147                 struct iwm_tx_data *data = &ring->data[i];
1148
1149                 data->cmd_paddr = paddr;
1150                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1151                     + offsetof(struct iwm_tx_cmd, scratch);
1152                 paddr += sizeof(struct iwm_device_cmd);
1153
1154                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1155                 if (error != 0) {
1156                         device_printf(sc->sc_dev,
1157                             "could not create TX buf DMA map\n");
1158                         goto fail;
1159                 }
1160         }
1161         KASSERT(paddr == ring->cmd_dma.paddr + size,
1162             ("invalid physical address"));
1163         return 0;
1164
1165 fail:   iwm_free_tx_ring(sc, ring);
1166         return error;
1167 }
1168
1169 static void
1170 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1171 {
1172         int i;
1173
1174         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1175                 struct iwm_tx_data *data = &ring->data[i];
1176
1177                 if (data->m != NULL) {
1178                         bus_dmamap_sync(ring->data_dmat, data->map,
1179                             BUS_DMASYNC_POSTWRITE);
1180                         bus_dmamap_unload(ring->data_dmat, data->map);
1181                         m_freem(data->m);
1182                         data->m = NULL;
1183                 }
1184         }
1185         /* Clear TX descriptors. */
1186         memset(ring->desc, 0, ring->desc_dma.size);
1187         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1188             BUS_DMASYNC_PREWRITE);
1189         sc->qfullmsk &= ~(1 << ring->qid);
1190         ring->queued = 0;
1191         ring->cur = 0;
1192
1193         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1194                 iwm_pcie_clear_cmd_in_flight(sc);
1195 }
1196
1197 static void
1198 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1199 {
1200         int i;
1201
1202         iwm_dma_contig_free(&ring->desc_dma);
1203         iwm_dma_contig_free(&ring->cmd_dma);
1204
1205         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1206                 struct iwm_tx_data *data = &ring->data[i];
1207
1208                 if (data->m != NULL) {
1209                         bus_dmamap_sync(ring->data_dmat, data->map,
1210                             BUS_DMASYNC_POSTWRITE);
1211                         bus_dmamap_unload(ring->data_dmat, data->map);
1212                         m_freem(data->m);
1213                         data->m = NULL;
1214                 }
1215                 if (data->map != NULL) {
1216                         bus_dmamap_destroy(ring->data_dmat, data->map);
1217                         data->map = NULL;
1218                 }
1219         }
1220         if (ring->data_dmat != NULL) {
1221                 bus_dma_tag_destroy(ring->data_dmat);
1222                 ring->data_dmat = NULL;
1223         }
1224 }
1225
1226 /*
1227  * High-level hardware frobbing routines
1228  */
1229
1230 static void
1231 iwm_enable_interrupts(struct iwm_softc *sc)
1232 {
1233         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1234         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1235 }
1236
1237 static void
1238 iwm_restore_interrupts(struct iwm_softc *sc)
1239 {
1240         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1241 }
1242
1243 static void
1244 iwm_disable_interrupts(struct iwm_softc *sc)
1245 {
1246         /* disable interrupts */
1247         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1248
1249         /* acknowledge all interrupts */
1250         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1251         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1252 }
1253
1254 static void
1255 iwm_ict_reset(struct iwm_softc *sc)
1256 {
1257         iwm_disable_interrupts(sc);
1258
1259         /* Reset ICT table. */
1260         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1261         sc->ict_cur = 0;
1262
1263         /* Set physical address of ICT table (4KB aligned). */
1264         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1265             IWM_CSR_DRAM_INT_TBL_ENABLE
1266             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1267             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1268             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1269
1270         /* Switch to ICT interrupt mode in driver. */
1271         sc->sc_flags |= IWM_FLAG_USE_ICT;
1272
1273         /* Re-enable interrupts. */
1274         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1275         iwm_enable_interrupts(sc);
1276 }
1277
1278 /*
1279  * Since this .. hard-resets things, it's time to actually
1280  * mark the first vap (if any) as having no mac context.
1281  * It's annoying, but since the driver is potentially being
1282  * stop/start'ed whilst active (thanks openbsd port!) we
1283  * have to correctly track this.
1284  */
1285 static void
1286 iwm_stop_device(struct iwm_softc *sc)
1287 {
1288         struct ieee80211com *ic = &sc->sc_ic;
1289         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1290         int chnl, qid;
1291         uint32_t mask = 0;
1292
1293         /* tell the device to stop sending interrupts */
1294         iwm_disable_interrupts(sc);
1295
1296         /*
1297          * FreeBSD-local: mark the first vap as not-uploaded,
1298          * so the next transition through auth/assoc
1299          * will correctly populate the MAC context.
1300          */
1301         if (vap) {
1302                 struct iwm_vap *iv = IWM_VAP(vap);
1303                 iv->phy_ctxt = NULL;
1304                 iv->is_uploaded = 0;
1305         }
1306         sc->sc_firmware_state = 0;
1307         sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1308
1309         /* device going down, Stop using ICT table */
1310         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1311
1312         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1313
1314         if (iwm_nic_lock(sc)) {
1315                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1316
1317                 /* Stop each Tx DMA channel */
1318                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1319                         IWM_WRITE(sc,
1320                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1321                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1322                 }
1323
1324                 /* Wait for DMA channels to be idle */
1325                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1326                     5000)) {
1327                         device_printf(sc->sc_dev,
1328                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1329                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1330                 }
1331                 iwm_nic_unlock(sc);
1332         }
1333         iwm_pcie_rx_stop(sc);
1334
1335         /* Stop RX ring. */
1336         iwm_reset_rx_ring(sc, &sc->rxq);
1337
1338         /* Reset all TX rings. */
1339         for (qid = 0; qid < nitems(sc->txq); qid++)
1340                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1341
1342         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1343                 /* Power-down device's busmaster DMA clocks */
1344                 if (iwm_nic_lock(sc)) {
1345                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1346                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1347                         iwm_nic_unlock(sc);
1348                 }
1349                 DELAY(5);
1350         }
1351
1352         /* Make sure (redundant) we've released our request to stay awake */
1353         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1354             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1355
1356         /* Stop the device, and put it in low power state */
1357         iwm_apm_stop(sc);
1358
1359         /* stop and reset the on-board processor */
1360         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1361         DELAY(1000);
1362
1363         /*
1364          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1365          * This is a bug in certain verions of the hardware.
1366          * Certain devices also keep sending HW RF kill interrupt all
1367          * the time, unless the interrupt is ACKed even if the interrupt
1368          * should be masked. Re-ACK all the interrupts here.
1369          */
1370         iwm_disable_interrupts(sc);
1371
1372         /*
1373          * Even if we stop the HW, we still want the RF kill
1374          * interrupt
1375          */
1376         iwm_enable_rfkill_int(sc);
1377         iwm_check_rfkill(sc);
1378 }
1379
1380 static void
1381 iwm_mvm_nic_config(struct iwm_softc *sc)
1382 {
1383         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1384         uint32_t reg_val = 0;
1385         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1386
1387         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1388             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1389         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1390             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1391         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1392             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1393
1394         /* SKU control */
1395         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1396             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1397         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1398             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1399
1400         /* radio configuration */
1401         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1402         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1403         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1404
1405         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1406
1407         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1408             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1409             radio_cfg_step, radio_cfg_dash);
1410
1411         /*
1412          * W/A : NIC is stuck in a reset state after Early PCIe power off
1413          * (PCIe power is lost before PERST# is asserted), causing ME FW
1414          * to lose ownership and not being able to obtain it back.
1415          */
1416         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1417                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1418                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1419                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1420         }
1421 }
1422
1423 static int
1424 iwm_nic_rx_init(struct iwm_softc *sc)
1425 {
1426         /*
1427          * Initialize RX ring.  This is from the iwn driver.
1428          */
1429         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1430
1431         /* Stop Rx DMA */
1432         iwm_pcie_rx_stop(sc);
1433
1434         if (!iwm_nic_lock(sc))
1435                 return EBUSY;
1436
1437         /* reset and flush pointers */
1438         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1439         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1440         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1441         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1442
1443         /* Set physical address of RX ring (256-byte aligned). */
1444         IWM_WRITE(sc,
1445             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1446
1447         /* Set physical address of RX status (16-byte aligned). */
1448         IWM_WRITE(sc,
1449             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1450
1451 #if defined(__DragonFly__)
1452         /* Force serialization (probably not needed but don't trust the HW) */
1453         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1454 #endif
1455
1456         /* Enable Rx DMA
1457          * XXX 5000 HW isn't supported by the iwm(4) driver.
1458          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1459          *      the credit mechanism in 5000 HW RX FIFO
1460          * Direct rx interrupts to hosts
1461          * Rx buffer size 4 or 8k or 12k
1462          * RB timeout 0x10
1463          * 256 RBDs
1464          */
1465         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1466             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1467             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1468             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1469             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1470             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1471             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1472
1473         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1474
1475         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1476         if (sc->cfg->host_interrupt_operation_mode)
1477                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1478
1479         /*
1480          * Thus sayeth el jefe (iwlwifi) via a comment:
1481          *
1482          * This value should initially be 0 (before preparing any
1483          * RBs), should be 8 after preparing the first 8 RBs (for example)
1484          */
1485         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1486
1487         iwm_nic_unlock(sc);
1488
1489         return 0;
1490 }
1491
1492 static int
1493 iwm_nic_tx_init(struct iwm_softc *sc)
1494 {
1495         int qid;
1496
1497         if (!iwm_nic_lock(sc))
1498                 return EBUSY;
1499
1500         /* Deactivate TX scheduler. */
1501         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1502
1503         /* Set physical address of "keep warm" page (16-byte aligned). */
1504         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1505
1506         /* Initialize TX rings. */
1507         for (qid = 0; qid < nitems(sc->txq); qid++) {
1508                 struct iwm_tx_ring *txq = &sc->txq[qid];
1509
1510                 /* Set physical address of TX ring (256-byte aligned). */
1511                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1512                     txq->desc_dma.paddr >> 8);
1513                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1514                     "%s: loading ring %d descriptors (%p) at %lx\n",
1515                     __func__,
1516                     qid, txq->desc,
1517                     (unsigned long) (txq->desc_dma.paddr >> 8));
1518         }
1519
1520         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1521
1522         iwm_nic_unlock(sc);
1523
1524         return 0;
1525 }
1526
1527 static int
1528 iwm_nic_init(struct iwm_softc *sc)
1529 {
1530         int error;
1531
1532         iwm_apm_init(sc);
1533         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1534                 iwm_set_pwr(sc);
1535
1536         iwm_mvm_nic_config(sc);
1537
1538         if ((error = iwm_nic_rx_init(sc)) != 0)
1539                 return error;
1540
1541         /*
1542          * Ditto for TX, from iwn
1543          */
1544         if ((error = iwm_nic_tx_init(sc)) != 0)
1545                 return error;
1546
1547         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1548             "%s: shadow registers enabled\n", __func__);
1549         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1550
1551         return 0;
1552 }
1553
1554 int
1555 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1556 {
1557         if (!iwm_nic_lock(sc)) {
1558                 device_printf(sc->sc_dev,
1559                     "%s: cannot enable txq %d\n",
1560                     __func__,
1561                     qid);
1562                 return EBUSY;
1563         }
1564
1565         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1566
1567         if (qid == IWM_MVM_CMD_QUEUE) {
1568                 /* unactivate before configuration */
1569                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1570                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1571                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1572
1573                 iwm_nic_unlock(sc);
1574
1575                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1576
1577                 if (!iwm_nic_lock(sc)) {
1578                         device_printf(sc->sc_dev,
1579                             "%s: cannot enable txq %d\n", __func__, qid);
1580                         return EBUSY;
1581                 }
1582                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1583                 iwm_nic_unlock(sc);
1584
1585                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1586                 /* Set scheduler window size and frame limit. */
1587                 iwm_write_mem32(sc,
1588                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1589                     sizeof(uint32_t),
1590                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1591                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1592                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1593                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1594
1595                 if (!iwm_nic_lock(sc)) {
1596                         device_printf(sc->sc_dev,
1597                             "%s: cannot enable txq %d\n", __func__, qid);
1598                         return EBUSY;
1599                 }
1600                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1601                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1602                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1603                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1604                     IWM_SCD_QUEUE_STTS_REG_MSK);
1605         } else {
1606                 struct iwm_scd_txq_cfg_cmd cmd;
1607                 int error;
1608
1609                 iwm_nic_unlock(sc);
1610
1611                 memset(&cmd, 0, sizeof(cmd));
1612                 cmd.scd_queue = qid;
1613                 cmd.enable = 1;
1614                 cmd.sta_id = sta_id;
1615                 cmd.tx_fifo = fifo;
1616                 cmd.aggregate = 0;
1617                 cmd.window = IWM_FRAME_LIMIT;
1618
1619                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1620                     sizeof(cmd), &cmd);
1621                 if (error) {
1622                         device_printf(sc->sc_dev,
1623                             "cannot enable txq %d\n", qid);
1624                         return error;
1625                 }
1626
1627                 if (!iwm_nic_lock(sc))
1628                         return EBUSY;
1629         }
1630
1631         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1632             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1633
1634         iwm_nic_unlock(sc);
1635
1636         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1637             __func__, qid, fifo);
1638
1639         return 0;
1640 }
1641
1642 static int
1643 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1644 {
1645         int error, chnl;
1646
1647         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1648             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1649
1650         if (!iwm_nic_lock(sc))
1651                 return EBUSY;
1652
1653         iwm_ict_reset(sc);
1654
1655         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1656         if (scd_base_addr != 0 &&
1657             scd_base_addr != sc->scd_base_addr) {
1658                 device_printf(sc->sc_dev,
1659                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1660                     __func__, sc->scd_base_addr, scd_base_addr);
1661         }
1662
1663         iwm_nic_unlock(sc);
1664
1665         /* reset context data, TX status and translation data */
1666         error = iwm_write_mem(sc,
1667             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1668             NULL, clear_dwords);
1669         if (error)
1670                 return EBUSY;
1671
1672         if (!iwm_nic_lock(sc))
1673                 return EBUSY;
1674
1675         /* Set physical address of TX scheduler rings (1KB aligned). */
1676         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1677
1678         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1679
1680         iwm_nic_unlock(sc);
1681
1682         /* enable command channel */
1683         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1684         if (error)
1685                 return error;
1686
1687         if (!iwm_nic_lock(sc))
1688                 return EBUSY;
1689
1690         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1691
1692         /* Enable DMA channels. */
1693         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1694                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1695                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1696                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1697         }
1698
1699         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1700             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1701
1702         iwm_nic_unlock(sc);
1703
1704         /* Enable L1-Active */
1705         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1706                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1707                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1708         }
1709
1710         return error;
1711 }
1712
1713 /*
1714  * NVM read access and content parsing.  We do not support
1715  * external NVM or writing NVM.
1716  * iwlwifi/mvm/nvm.c
1717  */
1718
1719 /* Default NVM size to read */
1720 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1721
1722 #define IWM_NVM_WRITE_OPCODE 1
1723 #define IWM_NVM_READ_OPCODE 0
1724
1725 /* load nvm chunk response */
1726 enum {
1727         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1728         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1729 };
1730
1731 static int
1732 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1733         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1734 {
1735         struct iwm_nvm_access_cmd nvm_access_cmd = {
1736                 .offset = htole16(offset),
1737                 .length = htole16(length),
1738                 .type = htole16(section),
1739                 .op_code = IWM_NVM_READ_OPCODE,
1740         };
1741         struct iwm_nvm_access_resp *nvm_resp;
1742         struct iwm_rx_packet *pkt;
1743         struct iwm_host_cmd cmd = {
1744                 .id = IWM_NVM_ACCESS_CMD,
1745                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1746                 .data = { &nvm_access_cmd, },
1747         };
1748         int ret, bytes_read, offset_read;
1749         uint8_t *resp_data;
1750
1751         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1752
1753         ret = iwm_send_cmd(sc, &cmd);
1754         if (ret) {
1755                 device_printf(sc->sc_dev,
1756                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1757                 return ret;
1758         }
1759
1760         pkt = cmd.resp_pkt;
1761
1762         /* Extract NVM response */
1763         nvm_resp = (void *)pkt->data;
1764         ret = le16toh(nvm_resp->status);
1765         bytes_read = le16toh(nvm_resp->length);
1766         offset_read = le16toh(nvm_resp->offset);
1767         resp_data = nvm_resp->data;
1768         if (ret) {
1769                 if ((offset != 0) &&
1770                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1771                         /*
1772                          * meaning of NOT_VALID_ADDRESS:
1773                          * driver try to read chunk from address that is
1774                          * multiple of 2K and got an error since addr is empty.
1775                          * meaning of (offset != 0): driver already
1776                          * read valid data from another chunk so this case
1777                          * is not an error.
1778                          */
1779                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1780                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1781                                     offset);
1782                         *len = 0;
1783                         ret = 0;
1784                 } else {
1785                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1786                                     "NVM access command failed with status %d\n", ret);
1787                         ret = EIO;
1788                 }
1789                 goto exit;
1790         }
1791
1792         if (offset_read != offset) {
1793                 device_printf(sc->sc_dev,
1794                     "NVM ACCESS response with invalid offset %d\n",
1795                     offset_read);
1796                 ret = EINVAL;
1797                 goto exit;
1798         }
1799
1800         if (bytes_read > length) {
1801                 device_printf(sc->sc_dev,
1802                     "NVM ACCESS response with too much data "
1803                     "(%d bytes requested, %d bytes received)\n",
1804                     length, bytes_read);
1805                 ret = EINVAL;
1806                 goto exit;
1807         }
1808
1809         /* Write data to NVM */
1810         memcpy(data + offset, resp_data, bytes_read);
1811         *len = bytes_read;
1812
1813  exit:
1814         iwm_free_resp(sc, &cmd);
1815         return ret;
1816 }
1817
1818 /*
1819  * Reads an NVM section completely.
1820  * NICs prior to 7000 family don't have a real NVM, but just read
1821  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1822  * by uCode, we need to manually check in this case that we don't
1823  * overflow and try to read more than the EEPROM size.
1824  * For 7000 family NICs, we supply the maximal size we can read, and
1825  * the uCode fills the response with as much data as we can,
1826  * without overflowing, so no check is needed.
1827  */
1828 static int
1829 iwm_nvm_read_section(struct iwm_softc *sc,
1830         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1831 {
1832         uint16_t seglen, length, offset = 0;
1833         int ret;
1834
1835         /* Set nvm section read length */
1836         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1837
1838         seglen = length;
1839
1840         /* Read the NVM until exhausted (reading less than requested) */
1841         while (seglen == length) {
1842                 /* Check no memory assumptions fail and cause an overflow */
1843                 if ((size_read + offset + length) >
1844                     sc->cfg->eeprom_size) {
1845                         device_printf(sc->sc_dev,
1846                             "EEPROM size is too small for NVM\n");
1847                         return ENOBUFS;
1848                 }
1849
1850                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1851                 if (ret) {
1852                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1853                                     "Cannot read NVM from section %d offset %d, length %d\n",
1854                                     section, offset, length);
1855                         return ret;
1856                 }
1857                 offset += seglen;
1858         }
1859
1860         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1861                     "NVM section %d read completed\n", section);
1862         *len = offset;
1863         return 0;
1864 }
1865
1866 /* NVM offsets (in words) definitions */
1867 enum iwm_nvm_offsets {
1868         /* NVM HW-Section offset (in words) definitions */
1869         IWM_HW_ADDR = 0x15,
1870
1871 /* NVM SW-Section offset (in words) definitions */
1872         IWM_NVM_SW_SECTION = 0x1C0,
1873         IWM_NVM_VERSION = 0,
1874         IWM_RADIO_CFG = 1,
1875         IWM_SKU = 2,
1876         IWM_N_HW_ADDRS = 3,
1877         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1878
1879 /* NVM calibration section offset (in words) definitions */
1880         IWM_NVM_CALIB_SECTION = 0x2B8,
1881         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1882 };
1883
1884 enum iwm_8000_nvm_offsets {
1885         /* NVM HW-Section offset (in words) definitions */
1886         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1887         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1888         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1889         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1890         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1891
1892         /* NVM SW-Section offset (in words) definitions */
1893         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1894         IWM_NVM_VERSION_8000 = 0,
1895         IWM_RADIO_CFG_8000 = 0,
1896         IWM_SKU_8000 = 2,
1897         IWM_N_HW_ADDRS_8000 = 3,
1898
1899         /* NVM REGULATORY -Section offset (in words) definitions */
1900         IWM_NVM_CHANNELS_8000 = 0,
1901         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1902         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1903         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1904
1905         /* NVM calibration section offset (in words) definitions */
1906         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1907         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1908 };
1909
1910 /* SKU Capabilities (actual values from NVM definition) */
1911 enum nvm_sku_bits {
1912         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1913         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1914         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1915         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1916 };
1917
1918 /* radio config bits (actual values from NVM definition) */
1919 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1920 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1921 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1922 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1923 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1924 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1925
1926 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1927 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1928 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1929 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1930 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1931 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1932
1933 /**
1934  * enum iwm_nvm_channel_flags - channel flags in NVM
1935  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1936  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1937  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1938  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1939  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1940  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1941  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1942  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1943  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1944  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1945  */
1946 enum iwm_nvm_channel_flags {
1947         IWM_NVM_CHANNEL_VALID = (1 << 0),
1948         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1949         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1950         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1951         IWM_NVM_CHANNEL_DFS = (1 << 7),
1952         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1953         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1954         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1955         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1956 };
1957
1958 /*
1959  * Translate EEPROM flags to net80211.
1960  */
1961 static uint32_t
1962 iwm_eeprom_channel_flags(uint16_t ch_flags)
1963 {
1964         uint32_t nflags;
1965
1966         nflags = 0;
1967         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1968                 nflags |= IEEE80211_CHAN_PASSIVE;
1969         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1970                 nflags |= IEEE80211_CHAN_NOADHOC;
1971         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1972                 nflags |= IEEE80211_CHAN_DFS;
1973                 /* Just in case. */
1974                 nflags |= IEEE80211_CHAN_NOADHOC;
1975         }
1976
1977         return (nflags);
1978 }
1979
1980 static void
1981 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1982     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1983     const uint8_t bands[])
1984 {
1985         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1986         uint32_t nflags;
1987         uint16_t ch_flags;
1988         uint8_t ieee;
1989         int error;
1990
1991         for (; ch_idx < ch_num; ch_idx++) {
1992                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1993                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1994                         ieee = iwm_nvm_channels[ch_idx];
1995                 else
1996                         ieee = iwm_nvm_channels_8000[ch_idx];
1997
1998                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1999                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2000                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2001                             ieee, ch_flags,
2002                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2003                             "5.2" : "2.4");
2004                         continue;
2005                 }
2006
2007                 nflags = iwm_eeprom_channel_flags(ch_flags);
2008                 error = ieee80211_add_channel(chans, maxchans, nchans,
2009                     ieee, 0, 0, nflags, bands);
2010                 if (error != 0)
2011                         break;
2012
2013                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2014                     "Ch. %d Flags %x [%sGHz] - Added\n",
2015                     ieee, ch_flags,
2016                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2017                     "5.2" : "2.4");
2018         }
2019 }
2020
2021 static void
2022 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2023     struct ieee80211_channel chans[])
2024 {
2025         struct iwm_softc *sc = ic->ic_softc;
2026         struct iwm_nvm_data *data = sc->nvm_data;
2027         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2028         size_t ch_num;
2029
2030         memset(bands, 0, sizeof(bands));
2031         /* 1-13: 11b/g channels. */
2032         setbit(bands, IEEE80211_MODE_11B);
2033         setbit(bands, IEEE80211_MODE_11G);
2034         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2035             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2036
2037         /* 14: 11b channel only. */
2038         clrbit(bands, IEEE80211_MODE_11G);
2039         iwm_add_channel_band(sc, chans, maxchans, nchans,
2040             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2041
2042         if (data->sku_cap_band_52GHz_enable) {
2043                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2044                         ch_num = nitems(iwm_nvm_channels);
2045                 else
2046                         ch_num = nitems(iwm_nvm_channels_8000);
2047                 memset(bands, 0, sizeof(bands));
2048                 setbit(bands, IEEE80211_MODE_11A);
2049                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2050                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2051         }
2052 }
2053
2054 static void
2055 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2056         const uint16_t *mac_override, const uint16_t *nvm_hw)
2057 {
2058         const uint8_t *hw_addr;
2059
2060         if (mac_override) {
2061                 static const uint8_t reserved_mac[] = {
2062                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2063                 };
2064
2065                 hw_addr = (const uint8_t *)(mac_override +
2066                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2067
2068                 /*
2069                  * Store the MAC address from MAO section.
2070                  * No byte swapping is required in MAO section
2071                  */
2072                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2073
2074                 /*
2075                  * Force the use of the OTP MAC address in case of reserved MAC
2076                  * address in the NVM, or if address is given but invalid.
2077                  */
2078                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2079                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2080                     iwm_is_valid_ether_addr(data->hw_addr) &&
2081                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2082                         return;
2083
2084                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2085                     "%s: mac address from nvm override section invalid\n",
2086                     __func__);
2087         }
2088
2089         if (nvm_hw) {
2090                 /* read the mac address from WFMP registers */
2091                 uint32_t mac_addr0 =
2092                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2093                 uint32_t mac_addr1 =
2094                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2095
2096                 hw_addr = (const uint8_t *)&mac_addr0;
2097                 data->hw_addr[0] = hw_addr[3];
2098                 data->hw_addr[1] = hw_addr[2];
2099                 data->hw_addr[2] = hw_addr[1];
2100                 data->hw_addr[3] = hw_addr[0];
2101
2102                 hw_addr = (const uint8_t *)&mac_addr1;
2103                 data->hw_addr[4] = hw_addr[1];
2104                 data->hw_addr[5] = hw_addr[0];
2105
2106                 return;
2107         }
2108
2109         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2110         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2111 }
2112
2113 static int
2114 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2115             const uint16_t *phy_sku)
2116 {
2117         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2118                 return le16_to_cpup(nvm_sw + IWM_SKU);
2119
2120         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2121 }
2122
2123 static int
2124 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2125 {
2126         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2128         else
2129                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2130                                                 IWM_NVM_VERSION_8000));
2131 }
2132
2133 static int
2134 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2135                   const uint16_t *phy_sku)
2136 {
2137         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2138                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2139
2140         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2141 }
2142
2143 static int
2144 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2145 {
2146         int n_hw_addr;
2147
2148         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2149                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2150
2151         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2152
2153         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2154 }
2155
2156 static void
2157 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2158                   uint32_t radio_cfg)
2159 {
2160         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2161                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2162                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2163                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2164                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2165                 return;
2166         }
2167
2168         /* set the radio configuration for family 8000 */
2169         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2170         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2171         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2172         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2173         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2174         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2175 }
2176
2177 static int
2178 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2179                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2180 {
2181 #ifdef notyet /* for FAMILY 9000 */
2182         if (cfg->mac_addr_from_csr) {
2183                 iwm_set_hw_address_from_csr(sc, data);
2184         } else
2185 #endif
2186         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2187                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2188
2189                 /* The byte order is little endian 16 bit, meaning 214365 */
2190                 data->hw_addr[0] = hw_addr[1];
2191                 data->hw_addr[1] = hw_addr[0];
2192                 data->hw_addr[2] = hw_addr[3];
2193                 data->hw_addr[3] = hw_addr[2];
2194                 data->hw_addr[4] = hw_addr[5];
2195                 data->hw_addr[5] = hw_addr[4];
2196         } else {
2197                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2198         }
2199
2200         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2201                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2202                 return EINVAL;
2203         }
2204
2205         return 0;
2206 }
2207
2208 static struct iwm_nvm_data *
2209 iwm_parse_nvm_data(struct iwm_softc *sc,
2210                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2211                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2212                    const uint16_t *phy_sku, const uint16_t *regulatory)
2213 {
2214         struct iwm_nvm_data *data;
2215         uint32_t sku, radio_cfg;
2216         uint16_t lar_config;
2217
2218         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2219                 data = kmalloc(sizeof(*data) +
2220                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2221                     M_DEVBUF, M_WAITOK | M_ZERO);
2222         } else {
2223                 data = kmalloc(sizeof(*data) +
2224                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2225                     M_DEVBUF, M_WAITOK | M_ZERO);
2226         }
2227         if (!data)
2228                 return NULL;
2229
2230         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2231
2232         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2233         iwm_set_radio_cfg(sc, data, radio_cfg);
2234
2235         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2236         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2237         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2238         data->sku_cap_11n_enable = 0;
2239
2240         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2241
2242         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2243                 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2244                                        IWM_NVM_LAR_OFFSET_8000_OLD :
2245                                        IWM_NVM_LAR_OFFSET_8000;
2246
2247                 lar_config = le16_to_cpup(regulatory + lar_offset);
2248                 data->lar_enabled = !!(lar_config &
2249                                        IWM_NVM_LAR_ENABLED_8000);
2250         }
2251
2252         /* If no valid mac address was found - bail out */
2253         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2254                 kfree(data, M_DEVBUF);
2255                 return NULL;
2256         }
2257
2258         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2259                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2260                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2261         } else {
2262                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2263                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2264         }
2265
2266         return data;
2267 }
2268
2269 static void
2270 iwm_free_nvm_data(struct iwm_nvm_data *data)
2271 {
2272         if (data != NULL)
2273                 kfree(data, M_DEVBUF);
2274 }
2275
2276 static struct iwm_nvm_data *
2277 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2278 {
2279         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2280
2281         /* Checking for required sections */
2282         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2283                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2284                     !sections[sc->cfg->nvm_hw_section_num].data) {
2285                         device_printf(sc->sc_dev,
2286                             "Can't parse empty OTP/NVM sections\n");
2287                         return NULL;
2288                 }
2289         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2290                 /* SW and REGULATORY sections are mandatory */
2291                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2292                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2293                         device_printf(sc->sc_dev,
2294                             "Can't parse empty OTP/NVM sections\n");
2295                         return NULL;
2296                 }
2297                 /* MAC_OVERRIDE or at least HW section must exist */
2298                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2299                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2300                         device_printf(sc->sc_dev,
2301                             "Can't parse mac_address, empty sections\n");
2302                         return NULL;
2303                 }
2304
2305                 /* PHY_SKU section is mandatory in B0 */
2306                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2307                         device_printf(sc->sc_dev,
2308                             "Can't parse phy_sku in B0, empty sections\n");
2309                         return NULL;
2310                 }
2311         } else {
2312                 panic("unknown device family %d\n", sc->cfg->device_family);
2313         }
2314
2315         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2316         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2317         calib = (const uint16_t *)
2318             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2319         regulatory = (const uint16_t *)
2320             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2321         mac_override = (const uint16_t *)
2322             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2323         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2324
2325         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2326             phy_sku, regulatory);
2327 }
2328
2329 static int
2330 iwm_nvm_init(struct iwm_softc *sc)
2331 {
2332         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2333         int i, ret, section;
2334         uint32_t size_read = 0;
2335         uint8_t *nvm_buffer, *temp;
2336         uint16_t len;
2337
2338         memset(nvm_sections, 0, sizeof(nvm_sections));
2339
2340         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2341                 return EINVAL;
2342
2343         /* load NVM values from nic */
2344         /* Read From FW NVM */
2345         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2346
2347         nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2348             M_INTWAIT | M_ZERO);
2349         if (!nvm_buffer)
2350                 return ENOMEM;
2351         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2352                 /* we override the constness for initial read */
2353                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2354                                            &len, size_read);
2355                 if (ret)
2356                         continue;
2357                 size_read += len;
2358                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2359                 if (!temp) {
2360                         ret = ENOMEM;
2361                         break;
2362                 }
2363                 memcpy(temp, nvm_buffer, len);
2364
2365                 nvm_sections[section].data = temp;
2366                 nvm_sections[section].length = len;
2367         }
2368         if (!size_read)
2369                 device_printf(sc->sc_dev, "OTP is blank\n");
2370         kfree(nvm_buffer, M_DEVBUF);
2371
2372         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2373         if (!sc->nvm_data)
2374                 return EINVAL;
2375         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2376                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2377
2378         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2379                 if (nvm_sections[i].data != NULL)
2380                         kfree(nvm_sections[i].data, M_DEVBUF);
2381         }
2382
2383         return 0;
2384 }
2385
2386 static int
2387 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2388         const struct iwm_fw_desc *section)
2389 {
2390         struct iwm_dma_info *dma = &sc->fw_dma;
2391         uint8_t *v_addr;
2392         bus_addr_t p_addr;
2393         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2394         int ret = 0;
2395
2396         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2397                     "%s: [%d] uCode section being loaded...\n",
2398                     __func__, section_num);
2399
2400         v_addr = dma->vaddr;
2401         p_addr = dma->paddr;
2402
2403         for (offset = 0; offset < section->len; offset += chunk_sz) {
2404                 uint32_t copy_size, dst_addr;
2405                 int extended_addr = FALSE;
2406
2407                 copy_size = MIN(chunk_sz, section->len - offset);
2408                 dst_addr = section->offset + offset;
2409
2410                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2411                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2412                         extended_addr = TRUE;
2413
2414                 if (extended_addr)
2415                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2416                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2417
2418                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2419                     copy_size);
2420                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2421                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2422                                                    copy_size);
2423
2424                 if (extended_addr)
2425                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2426                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2427
2428                 if (ret) {
2429                         device_printf(sc->sc_dev,
2430                             "%s: Could not load the [%d] uCode section\n",
2431                             __func__, section_num);
2432                         break;
2433                 }
2434         }
2435
2436         return ret;
2437 }
2438
2439 /*
2440  * ucode
2441  */
2442 static int
2443 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2444                              bus_addr_t phy_addr, uint32_t byte_cnt)
2445 {
2446         int ret;
2447
2448         sc->sc_fw_chunk_done = 0;
2449
2450         if (!iwm_nic_lock(sc))
2451                 return EBUSY;
2452
2453         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2454             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2455
2456         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2457             dst_addr);
2458
2459         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2460             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2461
2462         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2463             (iwm_get_dma_hi_addr(phy_addr)
2464              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2465
2466         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2467             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2468             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2469             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2470
2471         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2472             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2473             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2474             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2475
2476         iwm_nic_unlock(sc);
2477
2478         /* wait up to 5s for this segment to load */
2479         ret = 0;
2480         while (!sc->sc_fw_chunk_done) {
2481 #if defined(__DragonFly__)
2482                 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2483 #else
2484                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2485 #endif
2486                 if (ret)
2487                         break;
2488         }
2489
2490         if (ret != 0) {
2491                 device_printf(sc->sc_dev,
2492                     "fw chunk addr 0x%x len %d failed to load\n",
2493                     dst_addr, byte_cnt);
2494                 return ETIMEDOUT;
2495         }
2496
2497         return 0;
2498 }
2499
2500 static int
2501 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2502         const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2503 {
2504         int shift_param;
2505         int i, ret = 0, sec_num = 0x1;
2506         uint32_t val, last_read_idx = 0;
2507
2508         if (cpu == 1) {
2509                 shift_param = 0;
2510                 *first_ucode_section = 0;
2511         } else {
2512                 shift_param = 16;
2513                 (*first_ucode_section)++;
2514         }
2515
2516         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2517                 last_read_idx = i;
2518
2519                 /*
2520                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2521                  * CPU1 to CPU2.
2522                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2523                  * CPU2 non paged to CPU2 paging sec.
2524                  */
2525                 if (!image->sec[i].data ||
2526                     image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2527                     image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2528                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2529                                     "Break since Data not valid or Empty section, sec = %d\n",
2530                                     i);
2531                         break;
2532                 }
2533                 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2534                 if (ret)
2535                         return ret;
2536
2537                 /* Notify the ucode of the loaded section number and status */
2538                 if (iwm_nic_lock(sc)) {
2539                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2540                         val = val | (sec_num << shift_param);
2541                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2542                         sec_num = (sec_num << 1) | 0x1;
2543                         iwm_nic_unlock(sc);
2544                 }
2545         }
2546
2547         *first_ucode_section = last_read_idx;
2548
2549         iwm_enable_interrupts(sc);
2550
2551         if (iwm_nic_lock(sc)) {
2552                 if (cpu == 1)
2553                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2554                 else
2555                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2556                 iwm_nic_unlock(sc);
2557         }
2558
2559         return 0;
2560 }
2561
2562 static int
2563 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2564         const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2565 {
2566         int shift_param;
2567         int i, ret = 0;
2568         uint32_t last_read_idx = 0;
2569
2570         if (cpu == 1) {
2571                 shift_param = 0;
2572                 *first_ucode_section = 0;
2573         } else {
2574                 shift_param = 16;
2575                 (*first_ucode_section)++;
2576         }
2577
2578         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2579                 last_read_idx = i;
2580
2581                 /*
2582                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2583                  * CPU1 to CPU2.
2584                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2585                  * CPU2 non paged to CPU2 paging sec.
2586                  */
2587                 if (!image->sec[i].data ||
2588                     image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2589                     image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2590                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2591                                     "Break since Data not valid or Empty section, sec = %d\n",
2592                                      i);
2593                         break;
2594                 }
2595
2596                 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2597                 if (ret)
2598                         return ret;
2599         }
2600
2601         *first_ucode_section = last_read_idx;
2602
2603         return 0;
2604
2605 }
2606
2607 static int
2608 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2609 {
2610         int ret = 0;
2611         int first_ucode_section;
2612
2613         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2614                      image->is_dual_cpus ? "Dual" : "Single");
2615
2616         /* load to FW the binary non secured sections of CPU1 */
2617         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2618         if (ret)
2619                 return ret;
2620
2621         if (image->is_dual_cpus) {
2622                 /* set CPU2 header address */
2623                 if (iwm_nic_lock(sc)) {
2624                         iwm_write_prph(sc,
2625                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2626                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2627                         iwm_nic_unlock(sc);
2628                 }
2629
2630                 /* load to FW the binary sections of CPU2 */
2631                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2632                                                  &first_ucode_section);
2633                 if (ret)
2634                         return ret;
2635         }
2636
2637         iwm_enable_interrupts(sc);
2638
2639         /* release CPU reset */
2640         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2641
2642         return 0;
2643 }
2644
2645 int
2646 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2647         const struct iwm_fw_img *image)
2648 {
2649         int ret = 0;
2650         int first_ucode_section;
2651
2652         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2653                     image->is_dual_cpus ? "Dual" : "Single");
2654
2655         /* configure the ucode to be ready to get the secured image */
2656         /* release CPU reset */
2657         if (iwm_nic_lock(sc)) {
2658                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2659                     IWM_RELEASE_CPU_RESET_BIT);
2660                 iwm_nic_unlock(sc);
2661         }
2662
2663         /* load to FW the binary Secured sections of CPU1 */
2664         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2665             &first_ucode_section);
2666         if (ret)
2667                 return ret;
2668
2669         /* load to FW the binary sections of CPU2 */
2670         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2671             &first_ucode_section);
2672 }
2673
2674 /* XXX Get rid of this definition */
2675 static inline void
2676 iwm_enable_fw_load_int(struct iwm_softc *sc)
2677 {
2678         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2679         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2680         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2681 }
2682
2683 /* XXX Add proper rfkill support code */
2684 static int
2685 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2686 {
2687         int ret;
2688
2689         /* This may fail if AMT took ownership of the device */
2690         if (iwm_prepare_card_hw(sc)) {
2691                 device_printf(sc->sc_dev,
2692                     "%s: Exit HW not ready\n", __func__);
2693                 ret = EIO;
2694                 goto out;
2695         }
2696
2697         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2698
2699         iwm_disable_interrupts(sc);
2700
2701         /* make sure rfkill handshake bits are cleared */
2702         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2704             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2705
2706         /* clear (again), then enable host interrupts */
2707         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2708
2709         ret = iwm_nic_init(sc);
2710         if (ret) {
2711                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2712                 goto out;
2713         }
2714
2715         /*
2716          * Now, we load the firmware and don't want to be interrupted, even
2717          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2718          * FH_TX interrupt which is needed to load the firmware). If the
2719          * RF-Kill switch is toggled, we will find out after having loaded
2720          * the firmware and return the proper value to the caller.
2721          */
2722         iwm_enable_fw_load_int(sc);
2723
2724         /* really make sure rfkill handshake bits are cleared */
2725         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2726         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2727
2728         /* Load the given image to the HW */
2729         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2730                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2731         else
2732                 ret = iwm_pcie_load_given_ucode(sc, fw);
2733
2734         /* XXX re-check RF-Kill state */
2735
2736 out:
2737         return ret;
2738 }
2739
2740 static int
2741 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2742 {
2743         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2744                 .valid = htole32(valid_tx_ant),
2745         };
2746
2747         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2748             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2749 }
2750
2751 static int
2752 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2753 {
2754         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2755         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2756
2757         /* Set parameters */
2758         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2759         phy_cfg_cmd.calib_control.event_trigger =
2760             sc->sc_default_calib[ucode_type].event_trigger;
2761         phy_cfg_cmd.calib_control.flow_trigger =
2762             sc->sc_default_calib[ucode_type].flow_trigger;
2763
2764         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2765             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2766         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2767             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2768 }
2769
2770 static int
2771 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2772 {
2773         struct iwm_mvm_alive_data *alive_data = data;
2774         struct iwm_mvm_alive_resp_ver1 *palive1;
2775         struct iwm_mvm_alive_resp_ver2 *palive2;
2776         struct iwm_mvm_alive_resp *palive;
2777
2778         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2779                 palive1 = (void *)pkt->data;
2780
2781                 sc->support_umac_log = FALSE;
2782                 sc->error_event_table =
2783                         le32toh(palive1->error_event_table_ptr);
2784                 sc->log_event_table =
2785                         le32toh(palive1->log_event_table_ptr);
2786                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2787
2788                 alive_data->valid = le16toh(palive1->status) ==
2789                                     IWM_ALIVE_STATUS_OK;
2790                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2791                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2792                              le16toh(palive1->status), palive1->ver_type,
2793                              palive1->ver_subtype, palive1->flags);
2794         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2795                 palive2 = (void *)pkt->data;
2796                 sc->error_event_table =
2797                         le32toh(palive2->error_event_table_ptr);
2798                 sc->log_event_table =
2799                         le32toh(palive2->log_event_table_ptr);
2800                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2801                 sc->umac_error_event_table =
2802                         le32toh(palive2->error_info_addr);
2803
2804                 alive_data->valid = le16toh(palive2->status) ==
2805                                     IWM_ALIVE_STATUS_OK;
2806                 if (sc->umac_error_event_table)
2807                         sc->support_umac_log = TRUE;
2808
2809                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2810                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2811                             le16toh(palive2->status), palive2->ver_type,
2812                             palive2->ver_subtype, palive2->flags);
2813
2814                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2815                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2816                             palive2->umac_major, palive2->umac_minor);
2817         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2818                 palive = (void *)pkt->data;
2819
2820                 sc->error_event_table =
2821                         le32toh(palive->error_event_table_ptr);
2822                 sc->log_event_table =
2823                         le32toh(palive->log_event_table_ptr);
2824                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2825                 sc->umac_error_event_table =
2826                         le32toh(palive->error_info_addr);
2827
2828                 alive_data->valid = le16toh(palive->status) ==
2829                                     IWM_ALIVE_STATUS_OK;
2830                 if (sc->umac_error_event_table)
2831                         sc->support_umac_log = TRUE;
2832
2833                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2834                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2835                             le16toh(palive->status), palive->ver_type,
2836                             palive->ver_subtype, palive->flags);
2837
2838                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2839                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2840                             le32toh(palive->umac_major),
2841                             le32toh(palive->umac_minor));
2842         }
2843
2844         return TRUE;
2845 }
2846
2847 static int
2848 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2849         struct iwm_rx_packet *pkt, void *data)
2850 {
2851         struct iwm_phy_db *phy_db = data;
2852
2853         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2854                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2855                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2856                             __func__, pkt->hdr.code);
2857                 }
2858                 return TRUE;
2859         }
2860
2861         if (iwm_phy_db_set_section(phy_db, pkt)) {
2862                 device_printf(sc->sc_dev,
2863                     "%s: iwm_phy_db_set_section failed\n", __func__);
2864         }
2865
2866         return FALSE;
2867 }
2868
2869 static int
2870 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2871         enum iwm_ucode_type ucode_type)
2872 {
2873         struct iwm_notification_wait alive_wait;
2874         struct iwm_mvm_alive_data alive_data;
2875         const struct iwm_fw_img *fw;
2876         enum iwm_ucode_type old_type = sc->cur_ucode;
2877         int error;
2878         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2879
2880         fw = &sc->sc_fw.img[ucode_type];
2881         sc->cur_ucode = ucode_type;
2882         sc->ucode_loaded = FALSE;
2883
2884         memset(&alive_data, 0, sizeof(alive_data));
2885         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2886                                    alive_cmd, NELEM(alive_cmd),
2887                                    iwm_alive_fn, &alive_data);
2888
2889         error = iwm_start_fw(sc, fw);
2890         if (error) {
2891                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2892                 sc->cur_ucode = old_type;
2893                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2894                 return error;
2895         }
2896
2897         /*
2898          * Some things may run in the background now, but we
2899          * just wait for the ALIVE notification here.
2900          */
2901         IWM_UNLOCK(sc);
2902         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2903                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2904         IWM_LOCK(sc);
2905         if (error) {
2906                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2907                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2908                         if (iwm_nic_lock(sc)) {
2909                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2910                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2911                                 iwm_nic_unlock(sc);
2912                         }
2913                         device_printf(sc->sc_dev,
2914                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2915                             a, b);
2916                 }
2917                 sc->cur_ucode = old_type;
2918                 return error;
2919         }
2920
2921         if (!alive_data.valid) {
2922                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2923                     __func__);
2924                 sc->cur_ucode = old_type;
2925                 return EIO;
2926         }
2927
2928         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2929
2930         /*
2931          * configure and operate fw paging mechanism.
2932          * driver configures the paging flow only once, CPU2 paging image
2933          * included in the IWM_UCODE_INIT image.
2934          */
2935         if (fw->paging_mem_size) {
2936                 error = iwm_save_fw_paging(sc, fw);
2937                 if (error) {
2938                         device_printf(sc->sc_dev,
2939                             "%s: failed to save the FW paging image\n",
2940                             __func__);
2941                         return error;
2942                 }
2943
2944                 error = iwm_send_paging_cmd(sc, fw);
2945                 if (error) {
2946                         device_printf(sc->sc_dev,
2947                             "%s: failed to send the paging cmd\n", __func__);
2948                         iwm_free_fw_paging(sc);
2949                         return error;
2950                 }
2951         }
2952
2953         if (!error)
2954                 sc->ucode_loaded = TRUE;
2955         return error;
2956 }
2957
2958 /*
2959  * mvm misc bits
2960  */
2961
2962 static int
2963 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2964 {
2965         struct iwm_notification_wait calib_wait;
2966         static const uint16_t init_complete[] = {
2967                 IWM_INIT_COMPLETE_NOTIF,
2968                 IWM_CALIB_RES_NOTIF_PHY_DB
2969         };
2970         int ret;
2971
2972         /* do not operate with rfkill switch turned on */
2973         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2974                 device_printf(sc->sc_dev,
2975                     "radio is disabled by hardware switch\n");
2976                 return EPERM;
2977         }
2978
2979         iwm_init_notification_wait(sc->sc_notif_wait,
2980                                    &calib_wait,
2981                                    init_complete,
2982                                    NELEM(init_complete),
2983                                    iwm_wait_phy_db_entry,
2984                                    sc->sc_phy_db);
2985
2986         /* Will also start the device */
2987         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2988         if (ret) {
2989                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2990                     ret);
2991                 goto error;
2992         }
2993
2994         if (justnvm) {
2995                 /* Read nvm */
2996                 ret = iwm_nvm_init(sc);
2997                 if (ret) {
2998                         device_printf(sc->sc_dev, "failed to read nvm\n");
2999                         goto error;
3000                 }
3001                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3002                 goto error;
3003         }
3004
3005         ret = iwm_send_bt_init_conf(sc);
3006         if (ret) {
3007                 device_printf(sc->sc_dev,
3008                     "failed to send bt coex configuration: %d\n", ret);
3009                 goto error;
3010         }
3011
3012         /* Send TX valid antennas before triggering calibrations */
3013         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3014         if (ret) {
3015                 device_printf(sc->sc_dev,
3016                     "failed to send antennas before calibration: %d\n", ret);
3017                 goto error;
3018         }
3019
3020         /*
3021          * Send phy configurations command to init uCode
3022          * to start the 16.0 uCode init image internal calibrations.
3023          */
3024         ret = iwm_send_phy_cfg_cmd(sc);
3025         if (ret) {
3026                 device_printf(sc->sc_dev,
3027                     "%s: Failed to run INIT calibrations: %d\n",
3028                     __func__, ret);
3029                 goto error;
3030         }
3031
3032         /*
3033          * Nothing to do but wait for the init complete notification
3034          * from the firmware.
3035          */
3036         IWM_UNLOCK(sc);
3037         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3038             IWM_MVM_UCODE_CALIB_TIMEOUT);
3039         IWM_LOCK(sc);
3040
3041
3042         goto out;
3043
3044 error:
3045         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3046 out:
3047         return ret;
3048 }
3049
3050 static int
3051 iwm_mvm_config_ltr(struct iwm_softc *sc)
3052 {
3053         struct iwm_ltr_config_cmd cmd = {
3054                 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3055         };
3056
3057         if (!sc->sc_ltr_enabled)
3058                 return 0;
3059
3060         return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3061 }
3062
3063 /*
3064  * receive side
3065  */
3066
3067 /* (re)stock rx ring, called at init-time and at runtime */
3068 static int
3069 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3070 {
3071         struct iwm_rx_ring *ring = &sc->rxq;
3072         struct iwm_rx_data *data = &ring->data[idx];
3073         struct mbuf *m;
3074         bus_dmamap_t dmamap;
3075         bus_dma_segment_t seg;
3076         int nsegs, error;
3077
3078         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3079         if (m == NULL)
3080                 return ENOBUFS;
3081
3082         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3083 #if defined(__DragonFly__)
3084         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3085             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3086 #else
3087         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3088             &seg, &nsegs, BUS_DMA_NOWAIT);
3089 #endif
3090         if (error != 0) {
3091                 device_printf(sc->sc_dev,
3092                     "%s: can't map mbuf, error %d\n", __func__, error);
3093                 m_freem(m);
3094                 return error;
3095         }
3096
3097         if (data->m != NULL)
3098                 bus_dmamap_unload(ring->data_dmat, data->map);
3099
3100         /* Swap ring->spare_map with data->map */
3101         dmamap = data->map;
3102         data->map = ring->spare_map;
3103         ring->spare_map = dmamap;
3104
3105         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3106         data->m = m;
3107
3108         /* Update RX descriptor. */
3109         KKASSERT((seg.ds_addr & 255) == 0);
3110         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3111         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3112             BUS_DMASYNC_PREWRITE);
3113
3114         return 0;
3115 }
3116
3117 /*
3118  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3119  * values are reported by the fw as positive values - need to negate
3120  * to obtain their dBM.  Account for missing antennas by replacing 0
3121  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3122  */
3123 static int
3124 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3125 {
3126         int energy_a, energy_b, energy_c, max_energy;
3127         uint32_t val;
3128
3129         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3130         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3131             IWM_RX_INFO_ENERGY_ANT_A_POS;
3132         energy_a = energy_a ? -energy_a : -256;
3133         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3134             IWM_RX_INFO_ENERGY_ANT_B_POS;
3135         energy_b = energy_b ? -energy_b : -256;
3136         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3137             IWM_RX_INFO_ENERGY_ANT_C_POS;
3138         energy_c = energy_c ? -energy_c : -256;
3139         max_energy = MAX(energy_a, energy_b);
3140         max_energy = MAX(max_energy, energy_c);
3141
3142         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3143             "energy In A %d B %d C %d , and max %d\n",
3144             energy_a, energy_b, energy_c, max_energy);
3145
3146         return max_energy;
3147 }
3148
3149 static void
3150 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3151 {
3152         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3153
3154         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3155
3156         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3157 }
3158
3159 /*
3160  * Retrieve the average noise (in dBm) among receivers.
3161  */
3162 static int
3163 iwm_get_noise(struct iwm_softc *sc,
3164         const struct iwm_mvm_statistics_rx_non_phy *stats)
3165 {
3166         int i, total, nbant, noise;
3167
3168         total = nbant = noise = 0;
3169         for (i = 0; i < 3; i++) {
3170                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3171                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3172                     __func__, i, noise);
3173
3174                 if (noise) {
3175                         total += noise;
3176                         nbant++;
3177                 }
3178         }
3179
3180         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3181             __func__, nbant, total);
3182 #if 0
3183         /* There should be at least one antenna but check anyway. */
3184         return (nbant == 0) ? -127 : (total / nbant) - 107;
3185 #else
3186         /* For now, just hard-code it to -96 to be safe */
3187         return (-96);
3188 #endif
3189 }
3190
3191 static void
3192 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3193 {
3194         struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3195
3196         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3197         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3198 }
3199
3200 /*
3201  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3202  *
3203  * Handles the actual data of the Rx packet from the fw
3204  */
3205 static boolean_t
3206 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3207         boolean_t stolen)
3208 {
3209         struct ieee80211com *ic = &sc->sc_ic;
3210         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3211         struct ieee80211_frame *wh;
3212         struct ieee80211_node *ni;
3213         struct ieee80211_rx_stats rxs;
3214         struct iwm_rx_phy_info *phy_info;
3215         struct iwm_rx_mpdu_res_start *rx_res;
3216         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3217         uint32_t len;
3218         uint32_t rx_pkt_status;
3219         int rssi;
3220
3221         phy_info = &sc->sc_last_phy_info;
3222         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3223         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3224         len = le16toh(rx_res->byte_count);
3225         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3226
3227         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3228                 device_printf(sc->sc_dev,
3229                     "dsp size out of range [0,20]: %d\n",
3230                     phy_info->cfg_phy_cnt);
3231                 return FALSE;
3232         }
3233
3234         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3235             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3236                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3237                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3238                 return FALSE; /* drop */
3239         }
3240
3241         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3242         /* Note: RSSI is absolute (ie a -ve value) */
3243         if (rssi < IWM_MIN_DBM)
3244                 rssi = IWM_MIN_DBM;
3245         else if (rssi > IWM_MAX_DBM)
3246                 rssi = IWM_MAX_DBM;
3247
3248         /* Map it to relative value */
3249         rssi = rssi - sc->sc_noise;
3250
3251         /* replenish ring for the buffer we're going to feed to the sharks */
3252         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3253                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3254                     __func__);
3255                 return FALSE;
3256         }
3257
3258         m->m_data = pkt->data + sizeof(*rx_res);
3259         m->m_pkthdr.len = m->m_len = len;
3260
3261         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3262             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3263
3264         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3265
3266         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3267             "%s: phy_info: channel=%d, flags=0x%08x\n",
3268             __func__,
3269             le16toh(phy_info->channel),
3270             le16toh(phy_info->phy_flags));
3271
3272         /*
3273          * Populate an RX state struct with the provided information.
3274          */
3275         bzero(&rxs, sizeof(rxs));
3276         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3277         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3278         rxs.c_ieee = le16toh(phy_info->channel);
3279         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3280                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3281         } else {
3282                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3283         }
3284         /* rssi is in 1/2db units */
3285         rxs.rssi = rssi * 2;
3286         rxs.nf = sc->sc_noise;
3287
3288         if (ieee80211_radiotap_active_vap(vap)) {
3289                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3290
3291                 tap->wr_flags = 0;
3292                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3293                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3294                 tap->wr_chan_freq = htole16(rxs.c_freq);
3295                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3296                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3297                 tap->wr_dbm_antsignal = (int8_t)rssi;
3298                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3299                 tap->wr_tsft = phy_info->system_timestamp;
3300                 switch (phy_info->rate) {
3301                 /* CCK rates. */
3302                 case  10: tap->wr_rate =   2; break;
3303                 case  20: tap->wr_rate =   4; break;
3304                 case  55: tap->wr_rate =  11; break;
3305                 case 110: tap->wr_rate =  22; break;
3306                 /* OFDM rates. */
3307                 case 0xd: tap->wr_rate =  12; break;
3308                 case 0xf: tap->wr_rate =  18; break;
3309                 case 0x5: tap->wr_rate =  24; break;
3310                 case 0x7: tap->wr_rate =  36; break;
3311                 case 0x9: tap->wr_rate =  48; break;
3312                 case 0xb: tap->wr_rate =  72; break;
3313                 case 0x1: tap->wr_rate =  96; break;
3314                 case 0x3: tap->wr_rate = 108; break;
3315                 /* Unknown rate: should not happen. */
3316                 default:  tap->wr_rate =   0;
3317                 }
3318         }
3319
3320         IWM_UNLOCK(sc);
3321         if (ni != NULL) {
3322                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3323                 ieee80211_input_mimo(ni, m, &rxs);
3324                 ieee80211_free_node(ni);
3325         } else {
3326                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3327                 ieee80211_input_mimo_all(ic, m, &rxs);
3328         }
3329         IWM_LOCK(sc);
3330
3331         return TRUE;
3332 }
3333
3334 static int
3335 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3336         struct iwm_node *in)
3337 {
3338         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3339         struct ieee80211_node *ni = &in->in_ni;
3340         struct ieee80211vap *vap = ni->ni_vap;
3341         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3342         int failack = tx_resp->failure_frame;
3343         int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3344         boolean_t rate_matched;
3345         uint8_t tx_resp_rate;
3346         int ret;
3347
3348         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3349
3350         /* Update rate control statistics. */
3351         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3352             __func__,
3353             (int) le16toh(tx_resp->status.status),
3354             (int) le16toh(tx_resp->status.sequence),
3355             tx_resp->frame_count,
3356             tx_resp->bt_kill_count,
3357             tx_resp->failure_rts,
3358             tx_resp->failure_frame,
3359             le32toh(tx_resp->initial_rate),
3360             (int) le16toh(tx_resp->wireless_media_time));
3361
3362         tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3363
3364         /* For rate control, ignore frames sent at different initial rate */
3365         rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3366
3367         if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3368                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3369                     "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3370                     "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3371         }
3372
3373         if (status != IWM_TX_STATUS_SUCCESS &&
3374             status != IWM_TX_STATUS_DIRECT_DONE) {
3375                 if (rate_matched) {
3376                         ieee80211_ratectl_tx_complete(vap, ni,
3377                             IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3378                 }
3379                 ret = 1;
3380         } else {
3381                 if (rate_matched) {
3382                         ieee80211_ratectl_tx_complete(vap, ni,
3383                             IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3384                 }
3385                 ret = 0;
3386         }
3387
3388         if (rate_matched) {
3389                 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3390                 new_rate = vap->iv_bss->ni_txrate;
3391                 if (new_rate != 0 && new_rate != cur_rate) {
3392                         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3393                         iwm_setrates(sc, in, rix);
3394                         iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3395                 }
3396         }
3397
3398         return ret;
3399 }
3400
3401 static void
3402 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3403 {
3404         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3405         int idx = cmd_hdr->idx;
3406         int qid = cmd_hdr->qid;
3407         struct iwm_tx_ring *ring = &sc->txq[qid];
3408         struct iwm_tx_data *txd = &ring->data[idx];
3409         struct iwm_node *in = txd->in;
3410         struct mbuf *m = txd->m;
3411         int status;
3412
3413         KASSERT(txd->done == 0, ("txd not done"));
3414         KASSERT(txd->in != NULL, ("txd without node"));
3415         KASSERT(txd->m != NULL, ("txd without mbuf"));
3416
3417         sc->sc_tx_timer = 0;
3418
3419         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3420
3421         /* Unmap and free mbuf. */
3422         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3423         bus_dmamap_unload(ring->data_dmat, txd->map);
3424
3425         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3426             "free txd %p, in %p\n", txd, txd->in);
3427         txd->done = 1;
3428         txd->m = NULL;
3429         txd->in = NULL;
3430
3431         ieee80211_tx_complete(&in->in_ni, m, status);
3432
3433         if (--ring->queued < IWM_TX_RING_LOMARK) {
3434                 sc->qfullmsk &= ~(1 << ring->qid);
3435                 if (sc->qfullmsk == 0) {
3436                         iwm_start(sc);
3437                 }
3438         }
3439 }
3440
3441 /*
3442  * transmit side
3443  */
3444
3445 /*
3446  * Process a "command done" firmware notification.  This is where we wakeup
3447  * processes waiting for a synchronous command completion.
3448  * from if_iwn
3449  */
3450 static void
3451 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3452 {
3453         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3454         struct iwm_tx_data *data;
3455
3456         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3457                 return; /* Not a command ack. */
3458         }
3459
3460         data = &ring->data[pkt->hdr.idx];
3461
3462         /* If the command was mapped in an mbuf, free it. */
3463         if (data->m != NULL) {
3464                 bus_dmamap_sync(ring->data_dmat, data->map,
3465                     BUS_DMASYNC_POSTWRITE);
3466                 bus_dmamap_unload(ring->data_dmat, data->map);
3467                 m_freem(data->m);
3468                 data->m = NULL;
3469         }
3470         wakeup(&ring->desc[pkt->hdr.idx]);
3471
3472         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3473                 device_printf(sc->sc_dev,
3474                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3475                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3476                 /* XXX call iwm_force_nmi() */
3477         }
3478
3479         KKASSERT(ring->queued > 0);
3480         ring->queued--;
3481         if (ring->queued == 0)
3482                 iwm_pcie_clear_cmd_in_flight(sc);
3483 }
3484
3485 #if 0
3486 /*
3487  * necessary only for block ack mode
3488  */
3489 void
3490 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3491         uint16_t len)
3492 {
3493         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3494         uint16_t w_val;
3495
3496         scd_bc_tbl = sc->sched_dma.vaddr;
3497
3498         len += 8; /* magic numbers came naturally from paris */
3499         len = roundup(len, 4) / 4;
3500
3501         w_val = htole16(sta_id << 12 | len);
3502
3503         /* Update TX scheduler. */
3504         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3505         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3506             BUS_DMASYNC_PREWRITE);
3507
3508         /* I really wonder what this is ?!? */
3509         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3510                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3511                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3512                     BUS_DMASYNC_PREWRITE);
3513         }
3514 }
3515 #endif
3516
3517 /*
3518  * Fill in the rate related information for a transmit command.
3519  */
3520 static uint8_t
3521 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3522         struct mbuf *m, struct iwm_tx_cmd *tx)
3523 {
3524         struct ieee80211com *ic = &sc->sc_ic;
3525         struct ieee80211_node *ni = &in->in_ni;
3526         struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
3527         const struct ieee80211_txparam *tp = ni->ni_txparms;
3528         const struct iwm_rate *rinfo;
3529         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3530         int ridx, rate_flags;
3531
3532         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3533         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3534
3535         if (type == IEEE80211_FC0_TYPE_MGT) {
3536                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3537                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3538                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3539         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3540                 ridx = iwm_rate2ridx(sc, tp->mcastrate);
3541                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3542                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3543         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3544                 ridx = iwm_rate2ridx(sc, tp->ucastrate);
3545                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3547         } else if (m->m_flags & M_EAPOL) {
3548                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3549                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3550                     "%s: EAPOL (%d)\n", __func__, tp->mgmtrate);
3551         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3552                 /* This is the index into the programmed table */
3553                 tx->initial_rate_index = 0;
3554                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3555                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA (%d)\n",
3556                     __func__, ni->ni_txrate);
3557                 return ni->ni_txrate;
3558         } else {
3559                 ridx = iwm_rate2ridx(sc, tp->mgmtrate);
3560                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3561                     "%s: DEFAULT (%d)\n", __func__, tp->mgmtrate);
3562         }
3563
3564         /*
3565          * Sanity check ridx, and provide fallback. If the rate lookup
3566          * ever fails, iwm_rate2ridx() will already print an error message.
3567          */
3568         if (ridx < 0 || ridx > IWM_RIDX_MAX) {
3569                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3570                         /*
3571                          * XXX this assumes the mode is either 11a or not 11a;
3572                          * definitely won't work for 11n.
3573                          */
3574                         ridx = IWM_RIDX_OFDM;
3575                 } else {
3576                         ridx = IWM_RIDX_CCK;
3577                 }
3578         }
3579
3580         rinfo = &iwm_rates[ridx];
3581
3582         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3583             "%s: frame type=%d, ridx=%d, rate=%d, CCK=%d\n",
3584             __func__, type, ridx, rinfo->rate, !! (IWM_RIDX_IS_CCK(ridx)));
3585
3586         /* XXX TODO: hard-coded TX antenna? */
3587         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3588         if (IWM_RIDX_IS_CCK(ridx))
3589                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3590         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3591
3592         return rinfo->rate;
3593 }
3594
3595 #define TB0_SIZE 16
3596 static int
3597 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3598 {
3599         struct ieee80211com *ic = &sc->sc_ic;
3600         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3601         struct iwm_node *in = IWM_NODE(ni);
3602         struct iwm_tx_ring *ring;
3603         struct iwm_tx_data *data;
3604         struct iwm_tfd *desc;
3605         struct iwm_device_cmd *cmd;
3606         struct iwm_tx_cmd *tx;
3607         struct ieee80211_frame *wh;
3608         struct ieee80211_key *k = NULL;
3609 #if !defined(__DragonFly__)
3610         struct mbuf *m1;
3611 #endif
3612         uint32_t flags;
3613         u_int hdrlen;
3614         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3615         int nsegs;
3616         uint8_t rate, tid, type;
3617         int i, totlen, error, pad;
3618
3619         wh = mtod(m, struct ieee80211_frame *);
3620         hdrlen = ieee80211_anyhdrsize(wh);
3621         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3622         tid = 0;
3623         ring = &sc->txq[ac];
3624         desc = &ring->desc[ring->cur];
3625         memset(desc, 0, sizeof(*desc));
3626         data = &ring->data[ring->cur];
3627
3628         /* Fill out iwm_tx_cmd to send to the firmware */
3629         cmd = &ring->cmd[ring->cur];
3630         cmd->hdr.code = IWM_TX_CMD;
3631         cmd->hdr.flags = 0;
3632         cmd->hdr.qid = ring->qid;
3633         cmd->hdr.idx = ring->cur;
3634
3635         tx = (void *)cmd->data;
3636         memset(tx, 0, sizeof(*tx));
3637
3638         rate = iwm_tx_fill_cmd(sc, in, m, tx);
3639
3640         /* Encrypt the frame if need be. */
3641         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3642                 /* Retrieve key for TX && do software encryption. */
3643                 k = ieee80211_crypto_encap(ni, m);
3644                 if (k == NULL) {
3645                         m_freem(m);
3646                         return (ENOBUFS);
3647                 }
3648                 /* 802.11 header may have moved. */
3649                 wh = mtod(m, struct ieee80211_frame *);
3650         }
3651
3652         if (ieee80211_radiotap_active_vap(vap)) {
3653                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3654
3655                 tap->wt_flags = 0;
3656                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3657                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3658                 tap->wt_rate = rate;
3659                 if (k != NULL)
3660                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3661                 ieee80211_radiotap_tx(vap, m);
3662         }
3663
3664
3665         totlen = m->m_pkthdr.len;
3666
3667         flags = 0;
3668         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3669                 flags |= IWM_TX_CMD_FLG_ACK;
3670         }
3671
3672         if (type == IEEE80211_FC0_TYPE_DATA
3673             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3674             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3675                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3676         }
3677
3678         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3679             type != IEEE80211_FC0_TYPE_DATA)
3680                 tx->sta_id = sc->sc_aux_sta.sta_id;
3681         else
3682                 tx->sta_id = IWM_STATION_ID;
3683
3684         if (type == IEEE80211_FC0_TYPE_MGT) {
3685                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3686
3687                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3688                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3689                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3690                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3691                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3692                 } else {
3693                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3694                 }
3695         } else {
3696                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3697         }
3698
3699         if (hdrlen & 3) {
3700                 /* First segment length must be a multiple of 4. */
3701                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3702                 pad = 4 - (hdrlen & 3);
3703         } else
3704                 pad = 0;
3705
3706         tx->driver_txop = 0;
3707         tx->next_frame_len = 0;
3708
3709         tx->len = htole16(totlen);
3710         tx->tid_tspec = tid;
3711         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3712
3713         /* Set physical address of "scratch area". */
3714         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3715         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3716
3717         /* Copy 802.11 header in TX command. */
3718         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3719
3720         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3721
3722         tx->sec_ctl = 0;
3723         tx->tx_flags |= htole32(flags);
3724
3725         /* Trim 802.11 header. */
3726         m_adj(m, hdrlen);
3727 #if defined(__DragonFly__)
3728         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3729                                             segs, IWM_MAX_SCATTER - 2,
3730                                             &nsegs, BUS_DMA_NOWAIT);
3731 #else
3732         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3733             segs, &nsegs, BUS_DMA_NOWAIT);
3734 #endif
3735         if (error != 0) {
3736 #if defined(__DragonFly__)
3737                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3738                     error);
3739                 m_freem(m);
3740                 return error;
3741 #else
3742                 if (error != EFBIG) {
3743                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3744                             error);
3745                         m_freem(m);
3746                         return error;
3747                 }
3748                 /* Too many DMA segments, linearize mbuf. */
3749                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3750                 if (m1 == NULL) {
3751                         device_printf(sc->sc_dev,
3752                             "%s: could not defrag mbuf\n", __func__);
3753                         m_freem(m);
3754                         return (ENOBUFS);
3755                 }
3756                 m = m1;
3757
3758                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3759                     segs, &nsegs, BUS_DMA_NOWAIT);
3760                 if (error != 0) {
3761                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3762                             error);
3763                         m_freem(m);
3764                         return error;
3765                 }
3766 #endif
3767         }
3768         data->m = m;
3769         data->in = in;
3770         data->done = 0;
3771
3772         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3773             "sending txd %p, in %p\n", data, data->in);
3774         KASSERT(data->in != NULL, ("node is NULL"));
3775
3776         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3777             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3778             ring->qid, ring->cur, totlen, nsegs,
3779             le32toh(tx->tx_flags),
3780             le32toh(tx->rate_n_flags),
3781             tx->initial_rate_index
3782             );
3783
3784         /* Fill TX descriptor. */
3785         desc->num_tbs = 2 + nsegs;
3786
3787         desc->tbs[0].lo = htole32(data->cmd_paddr);
3788         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3789             (TB0_SIZE << 4);
3790         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3791         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3792             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3793               + hdrlen + pad - TB0_SIZE) << 4);
3794
3795         /* Other DMA segments are for data payload. */
3796         for (i = 0; i < nsegs; i++) {
3797                 seg = &segs[i];
3798                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3799                 desc->tbs[i+2].hi_n_len = \
3800                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3801                     | ((seg->ds_len) << 4);
3802         }
3803
3804         bus_dmamap_sync(ring->data_dmat, data->map,
3805             BUS_DMASYNC_PREWRITE);
3806         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3807             BUS_DMASYNC_PREWRITE);
3808         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3809             BUS_DMASYNC_PREWRITE);
3810
3811 #if 0
3812         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3813 #endif
3814
3815         /* Kick TX ring. */
3816         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3817         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3818
3819         /* Mark TX ring as full if we reach a certain threshold. */
3820         if (++ring->queued > IWM_TX_RING_HIMARK) {
3821                 sc->qfullmsk |= 1 << ring->qid;
3822         }
3823
3824         return 0;
3825 }
3826
3827 static int
3828 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3829     const struct ieee80211_bpf_params *params)
3830 {
3831         struct ieee80211com *ic = ni->ni_ic;
3832         struct iwm_softc *sc = ic->ic_softc;
3833         int error = 0;
3834
3835         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3836             "->%s begin\n", __func__);
3837
3838         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3839                 m_freem(m);
3840                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3841                     "<-%s not RUNNING\n", __func__);
3842                 return (ENETDOWN);
3843         }
3844
3845         IWM_LOCK(sc);
3846         /* XXX fix this */
3847         if (params == NULL) {
3848                 error = iwm_tx(sc, m, ni, 0);
3849         } else {
3850                 error = iwm_tx(sc, m, ni, 0);
3851         }
3852         if (sc->sc_tx_timer == 0)
3853                 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3854         sc->sc_tx_timer = 5;
3855         IWM_UNLOCK(sc);
3856
3857         return (error);
3858 }
3859
3860 /*
3861  * mvm/tx.c
3862  */
3863
3864 /*
3865  * Note that there are transports that buffer frames before they reach
3866  * the firmware. This means that after flush_tx_path is called, the
3867  * queue might not be empty. The race-free way to handle this is to:
3868  * 1) set the station as draining
3869  * 2) flush the Tx path
3870  * 3) wait for the transport queues to be empty
3871  */
3872 int
3873 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3874 {
3875         int ret;
3876         struct iwm_tx_path_flush_cmd flush_cmd = {
3877                 .queues_ctl = htole32(tfd_msk),
3878                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3879         };
3880
3881         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3882             sizeof(flush_cmd), &flush_cmd);
3883         if (ret)
3884                 device_printf(sc->sc_dev,
3885                     "Flushing tx queue failed: %d\n", ret);
3886         return ret;
3887 }
3888
3889 static int
3890 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3891 {
3892         struct iwm_time_quota_cmd cmd;
3893         int i, idx, ret, num_active_macs, quota, quota_rem;
3894         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3895         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3896         uint16_t id;
3897
3898         memset(&cmd, 0, sizeof(cmd));
3899
3900         /* currently, PHY ID == binding ID */
3901         if (ivp) {
3902                 id = ivp->phy_ctxt->id;
3903                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3904                 colors[id] = ivp->phy_ctxt->color;
3905
3906                 if (1)
3907                         n_ifs[id] = 1;
3908         }
3909
3910         /*
3911          * The FW's scheduling session consists of
3912          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3913          * equally between all the bindings that require quota
3914          */
3915         num_active_macs = 0;
3916         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3917                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3918                 num_active_macs += n_ifs[i];
3919         }
3920
3921         quota = 0;
3922         quota_rem = 0;
3923         if (num_active_macs) {
3924                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3925                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3926         }
3927
3928         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3929                 if (colors[i] < 0)
3930                         continue;
3931
3932                 cmd.quotas[idx].id_and_color =
3933                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3934
3935                 if (n_ifs[i] <= 0) {
3936                         cmd.quotas[idx].quota = htole32(0);
3937                         cmd.quotas[idx].max_duration = htole32(0);
3938                 } else {
3939                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3940                         cmd.quotas[idx].max_duration = htole32(0);
3941                 }
3942                 idx++;
3943         }
3944
3945         /* Give the remainder of the session to the first binding */
3946         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3947
3948         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3949             sizeof(cmd), &cmd);
3950         if (ret)
3951                 device_printf(sc->sc_dev,
3952                     "%s: Failed to send quota: %d\n", __func__, ret);
3953         return ret;
3954 }
3955
3956 /*
3957  * ieee80211 routines
3958  */
3959
3960 /*
3961  * Change to AUTH state in 80211 state machine.  Roughly matches what
3962  * Linux does in bss_info_changed().
3963  */
3964 static int
3965 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3966 {
3967         struct ieee80211_node *ni;
3968         struct iwm_node *in;
3969         struct iwm_vap *iv = IWM_VAP(vap);
3970         uint32_t duration;
3971         int error;
3972
3973         /*
3974          * XXX i have a feeling that the vap node is being
3975          * freed from underneath us. Grr.
3976          */
3977         ni = ieee80211_ref_node(vap->iv_bss);
3978         in = IWM_NODE(ni);
3979         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3980             "%s: called; vap=%p, bss ni=%p\n",
3981             __func__,
3982             vap,
3983             ni);
3984         IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
3985             __func__, ether_sprintf(ni->ni_bssid));
3986
3987         in->in_assoc = 0;
3988         iv->iv_auth = 1;
3989
3990         /*
3991          * Firmware bug - it'll crash if the beacon interval is less
3992          * than 16. We can't avoid connecting at all, so refuse the
3993          * station state change, this will cause net80211 to abandon
3994          * attempts to connect to this AP, and eventually wpa_s will
3995          * blacklist the AP...
3996          */
3997         if (ni->ni_intval < 16) {
3998                 device_printf(sc->sc_dev,
3999                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4000                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
4001                 error = EINVAL;
4002                 goto out;
4003         }
4004
4005         error = iwm_allow_mcast(vap, sc);
4006         if (error) {
4007                 device_printf(sc->sc_dev,
4008                     "%s: failed to set multicast\n", __func__);
4009                 goto out;
4010         }
4011
4012         /*
4013          * This is where it deviates from what Linux does.
4014          *
4015          * Linux iwlwifi doesn't reset the nic each time, nor does it
4016          * call ctxt_add() here.  Instead, it adds it during vap creation,
4017          * and always does a mac_ctx_changed().
4018          *
4019          * The openbsd port doesn't attempt to do that - it reset things
4020          * at odd states and does the add here.
4021          *
4022          * So, until the state handling is fixed (ie, we never reset
4023          * the NIC except for a firmware failure, which should drag
4024          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4025          * contexts that are required), let's do a dirty hack here.
4026          */
4027         if (iv->is_uploaded) {
4028                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4029                         device_printf(sc->sc_dev,
4030                             "%s: failed to update MAC\n", __func__);
4031                         goto out;
4032                 }
4033         } else {
4034                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4035                         device_printf(sc->sc_dev,
4036                             "%s: failed to add MAC\n", __func__);
4037                         goto out;
4038                 }
4039         }
4040         sc->sc_firmware_state = 1;
4041
4042         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4043             in->in_ni.ni_chan, 1, 1)) != 0) {
4044                 device_printf(sc->sc_dev,
4045                     "%s: failed update phy ctxt\n", __func__);
4046                 goto out;
4047         }
4048         iv->phy_ctxt = &sc->sc_phyctxt[0];
4049
4050         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4051                 device_printf(sc->sc_dev,
4052                     "%s: binding update cmd\n", __func__);
4053                 goto out;
4054         }
4055         sc->sc_firmware_state = 2;
4056         /*
4057          * Authentication becomes unreliable when powersaving is left enabled
4058          * here. Powersaving will be activated again when association has
4059          * finished or is aborted.
4060          */
4061         iv->ps_disabled = TRUE;
4062         error = iwm_mvm_power_update_mac(sc);
4063         iv->ps_disabled = FALSE;
4064         if (error != 0) {
4065                 device_printf(sc->sc_dev,
4066                     "%s: failed to update power management\n",
4067                     __func__);
4068                 goto out;
4069         }
4070         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4071                 device_printf(sc->sc_dev,
4072                     "%s: failed to add sta\n", __func__);
4073                 goto out;
4074         }
4075         sc->sc_firmware_state = 3;
4076
4077         /*
4078          * Prevent the FW from wandering off channel during association
4079          * by "protecting" the session with a time event.
4080          */
4081         /* XXX duration is in units of TU, not MS */
4082         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4083         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4084
4085         error = 0;
4086 out:
4087         if (error != 0)
4088                 iv->iv_auth = 0;
4089         ieee80211_free_node(ni);
4090         return (error);
4091 }
4092
4093 static struct ieee80211_node *
4094 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4095 {
4096         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4097             M_INTWAIT | M_ZERO);
4098 }
4099
4100 static uint8_t
4101 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4102 {
4103         uint8_t plcp = rate_n_flags & 0xff;
4104         int i;
4105
4106         for (i = 0; i <= IWM_RIDX_MAX; i++) {
4107                 if (iwm_rates[i].plcp == plcp)
4108                         return iwm_rates[i].rate;
4109         }
4110         return 0;
4111 }
4112
4113 uint8_t
4114 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4115 {
4116         int i;
4117         uint8_t rval;
4118
4119         for (i = 0; i < rs->rs_nrates; i++) {
4120                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4121                 if (rval == iwm_rates[ridx].rate)
4122                         return rs->rs_rates[i];
4123         }
4124
4125         return 0;
4126 }
4127
4128 static int
4129 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4130 {
4131         int i;
4132
4133         for (i = 0; i <= IWM_RIDX_MAX; i++) {
4134                 if (iwm_rates[i].rate == rate)
4135                         return i;
4136         }
4137
4138         device_printf(sc->sc_dev,
4139             "%s: WARNING: device rate for %u not found!\n",
4140             __func__, rate);
4141
4142         return -1;
4143 }
4144
4145 static void
4146 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4147 {
4148         struct ieee80211_node *ni = &in->in_ni;
4149         struct iwm_lq_cmd *lq = &in->in_lq;
4150         struct ieee80211_rateset *rs = &ni->ni_rates;
4151         int nrates = rs->rs_nrates;
4152         int i, ridx, tab = 0;
4153         int txant = 0;
4154
4155         KKASSERT(rix >= 0 && rix < nrates);
4156
4157         if (nrates > nitems(lq->rs_table)) {
4158                 device_printf(sc->sc_dev,
4159                     "%s: node supports %d rates, driver handles "
4160                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4161                 return;
4162         }
4163         if (nrates == 0) {
4164                 device_printf(sc->sc_dev,
4165                     "%s: node supports 0 rates, odd!\n", __func__);
4166                 return;
4167         }
4168         nrates = imin(rix + 1, nrates);
4169
4170         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4171             "%s: nrates=%d\n", __func__, nrates);
4172
4173         /* then construct a lq_cmd based on those */
4174         memset(lq, 0, sizeof(*lq));
4175         lq->sta_id = IWM_STATION_ID;
4176
4177         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4178         if (ni->ni_flags & IEEE80211_NODE_HT)
4179                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4180
4181         /*
4182          * are these used? (we don't do SISO or MIMO)
4183          * need to set them to non-zero, though, or we get an error.
4184          */
4185         lq->single_stream_ant_msk = 1;
4186         lq->dual_stream_ant_msk = 1;
4187
4188         /*
4189          * Build the actual rate selection table.
4190          * The lowest bits are the rates.  Additionally,
4191          * CCK needs bit 9 to be set.  The rest of the bits
4192          * we add to the table select the tx antenna
4193          * Note that we add the rates in the highest rate first
4194          * (opposite of ni_rates).
4195          */
4196         for (i = 0; i < nrates; i++) {
4197                 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4198                 int nextant;
4199
4200                 /* Map 802.11 rate to HW rate index. */
4201                 ridx = iwm_rate2ridx(sc, rate);
4202                 if (ridx == -1)
4203                         continue;
4204
4205                 if (txant == 0)
4206                         txant = iwm_mvm_get_valid_tx_ant(sc);
4207                 nextant = 1<<(ffs(txant)-1);
4208                 txant &= ~nextant;
4209
4210                 tab = iwm_rates[ridx].plcp;
4211                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4212                 if (IWM_RIDX_IS_CCK(ridx))
4213                         tab |= IWM_RATE_MCS_CCK_MSK;
4214                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4215                     "station rate i=%d, rate=%d, hw=%x\n",
4216                     i, iwm_rates[ridx].rate, tab);
4217                 lq->rs_table[i] = htole32(tab);
4218         }
4219         /* then fill the rest with the lowest possible rate */
4220         for (i = nrates; i < nitems(lq->rs_table); i++) {
4221                 KASSERT(tab != 0, ("invalid tab"));
4222                 lq->rs_table[i] = htole32(tab);
4223         }
4224 }
4225
4226 static int
4227 iwm_media_change(struct ifnet *ifp)
4228 {
4229         struct ieee80211vap *vap = ifp->if_softc;
4230         struct ieee80211com *ic = vap->iv_ic;
4231         struct iwm_softc *sc = ic->ic_softc;
4232         int error;
4233
4234         error = ieee80211_media_change(ifp);
4235         if (error != ENETRESET)
4236                 return error;
4237
4238         IWM_LOCK(sc);
4239         if (ic->ic_nrunning > 0) {
4240                 iwm_stop(sc);
4241                 iwm_init(sc);
4242         }
4243         IWM_UNLOCK(sc);
4244         return error;
4245 }
4246
4247 static void
4248 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4249 {
4250         struct iwm_vap *ivp = IWM_VAP(vap);
4251         int error;
4252
4253         /* Avoid Tx watchdog triggering, when transfers get dropped here. */
4254         sc->sc_tx_timer = 0;
4255
4256         ivp->iv_auth = 0;
4257         if (sc->sc_firmware_state == 3) {
4258                 iwm_xmit_queue_drain(sc);
4259 //              iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4260                 error = iwm_mvm_rm_sta(sc, vap, TRUE);
4261                 if (error) {
4262                         device_printf(sc->sc_dev,
4263                             "%s: Failed to remove station: %d\n",
4264                             __func__, error);
4265                 }
4266         }
4267         if (sc->sc_firmware_state == 3) {
4268                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4269                 if (error) {
4270                         device_printf(sc->sc_dev,
4271                             "%s: Failed to change mac context: %d\n",
4272                             __func__, error);
4273                 }
4274         }
4275         if (sc->sc_firmware_state == 3) {
4276                 error = iwm_mvm_sf_update(sc, vap, FALSE);
4277                 if (error) {
4278                         device_printf(sc->sc_dev,
4279                             "%s: Failed to update smart FIFO: %d\n",
4280                             __func__, error);
4281                 }
4282         }
4283         if (sc->sc_firmware_state == 3) {
4284                 error = iwm_mvm_rm_sta_id(sc, vap);
4285                 if (error) {
4286                         device_printf(sc->sc_dev,
4287                             "%s: Failed to remove station id: %d\n",
4288                             __func__, error);
4289                 }
4290         }
4291         if (sc->sc_firmware_state == 3) {
4292                 error = iwm_mvm_update_quotas(sc, NULL);
4293                 if (error) {
4294                         device_printf(sc->sc_dev,
4295                             "%s: Failed to update PHY quota: %d\n",
4296                             __func__, error);
4297                 }
4298         }
4299         if (sc->sc_firmware_state == 3) {
4300                 /* XXX Might need to specify bssid correctly. */
4301                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4302                 if (error) {
4303                         device_printf(sc->sc_dev,
4304                             "%s: Failed to change mac context: %d\n",
4305                             __func__, error);
4306                 }
4307         }
4308         if (sc->sc_firmware_state == 3) {
4309                 sc->sc_firmware_state = 2;
4310         }
4311         if (sc->sc_firmware_state > 1) {
4312                 error = iwm_mvm_binding_remove_vif(sc, ivp);
4313                 if (error) {
4314                         device_printf(sc->sc_dev,
4315                             "%s: Failed to remove channel ctx: %d\n",
4316                             __func__, error);
4317                 }
4318         }
4319         if (sc->sc_firmware_state > 1) {
4320                 sc->sc_firmware_state = 1;
4321         }
4322         ivp->phy_ctxt = NULL;
4323         if (sc->sc_firmware_state > 0) {
4324                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4325                 if (error) {
4326                         device_printf(sc->sc_dev,
4327                             "%s: Failed to change mac context: %d\n",
4328                             __func__, error);
4329                 }
4330         }
4331         if (sc->sc_firmware_state > 0) {
4332                 error = iwm_mvm_power_update_mac(sc);
4333                 if (error != 0) {
4334                         device_printf(sc->sc_dev,
4335                             "%s: failed to update power management\n",
4336                             __func__);
4337                 }
4338         }
4339         sc->sc_firmware_state = 0;
4340 }
4341
4342 static int
4343 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4344 {
4345         struct iwm_vap *ivp = IWM_VAP(vap);
4346         struct ieee80211com *ic = vap->iv_ic;
4347         struct iwm_softc *sc = ic->ic_softc;
4348         struct iwm_node *in;
4349         int error;
4350
4351         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4352             "switching state %s -> %s arg=0x%x\n",
4353             ieee80211_state_name[vap->iv_state],
4354             ieee80211_state_name[nstate],
4355             arg);
4356
4357         IEEE80211_UNLOCK(ic);
4358         IWM_LOCK(sc);
4359
4360         if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4361             (nstate == IEEE80211_S_AUTH ||
4362              nstate == IEEE80211_S_ASSOC ||
4363              nstate == IEEE80211_S_RUN)) {
4364                 /* Stop blinking for a scan, when authenticating. */
4365                 iwm_led_blink_stop(sc);
4366         }
4367
4368         if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4369                 iwm_mvm_led_disable(sc);
4370                 /* disable beacon filtering if we're hopping out of RUN */
4371                 iwm_mvm_disable_beacon_filter(sc);
4372                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4373                         in->in_assoc = 0;
4374         }
4375
4376         if ((vap->iv_state == IEEE80211_S_AUTH ||
4377              vap->iv_state == IEEE80211_S_ASSOC ||
4378              vap->iv_state == IEEE80211_S_RUN) &&
4379             (nstate == IEEE80211_S_INIT ||
4380              nstate == IEEE80211_S_SCAN ||
4381              nstate == IEEE80211_S_AUTH)) {
4382                 iwm_mvm_stop_session_protection(sc, ivp);
4383         }
4384
4385         if ((vap->iv_state == IEEE80211_S_RUN ||
4386              vap->iv_state == IEEE80211_S_ASSOC) &&
4387             nstate == IEEE80211_S_INIT) {
4388                 /*
4389                  * In this case, iv_newstate() wants to send an 80211 frame on
4390                  * the network that we are leaving. So we need to call it,
4391                  * before tearing down all the firmware state.
4392                  */
4393                 IWM_UNLOCK(sc);
4394                 IEEE80211_LOCK(ic);
4395                 ivp->iv_newstate(vap, nstate, arg);
4396                 IEEE80211_UNLOCK(ic);
4397                 IWM_LOCK(sc);
4398                 iwm_bring_down_firmware(sc, vap);
4399                 IWM_UNLOCK(sc);
4400                 IEEE80211_LOCK(ic);
4401                 return 0;
4402         }
4403
4404         switch (nstate) {
4405         case IEEE80211_S_INIT:
4406         case IEEE80211_S_SCAN:
4407                 break;
4408
4409         case IEEE80211_S_AUTH:
4410                 iwm_bring_down_firmware(sc, vap);
4411                 if ((error = iwm_auth(vap, sc)) != 0) {
4412                         device_printf(sc->sc_dev,
4413                             "%s: could not move to auth state: %d\n",
4414                             __func__, error);
4415                         iwm_bring_down_firmware(sc, vap);
4416                         IWM_UNLOCK(sc);
4417                         IEEE80211_LOCK(ic);
4418                         return 1;
4419                 }
4420                 break;
4421
4422         case IEEE80211_S_ASSOC:
4423                 /*
4424                  * EBS may be disabled due to previous failures reported by FW.
4425                  * Reset EBS status here assuming environment has been changed.
4426                  */
4427                 sc->last_ebs_successful = TRUE;
4428                 break;
4429
4430         case IEEE80211_S_RUN:
4431                 in = IWM_NODE(vap->iv_bss);
4432                 /* Update the association state, now we have it all */
4433                 /* (eg associd comes in at this point */
4434                 error = iwm_mvm_update_sta(sc, in);
4435                 if (error != 0) {
4436                         device_printf(sc->sc_dev,
4437                             "%s: failed to update STA\n", __func__);
4438                         IWM_UNLOCK(sc);
4439                         IEEE80211_LOCK(ic);
4440                         return error;
4441                 }
4442                 in->in_assoc = 1;
4443                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4444                 if (error != 0) {
4445                         device_printf(sc->sc_dev,
4446                             "%s: failed to update MAC: %d\n", __func__, error);
4447                 }
4448
4449                 iwm_mvm_sf_update(sc, vap, FALSE);
4450                 iwm_mvm_enable_beacon_filter(sc, ivp);
4451                 iwm_mvm_power_update_mac(sc);
4452                 iwm_mvm_update_quotas(sc, ivp);
4453                 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4454                 iwm_setrates(sc, in, rix);
4455
4456                 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4457                         device_printf(sc->sc_dev,
4458                             "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4459                 }
4460
4461                 iwm_mvm_led_enable(sc);
4462                 break;
4463
4464         default:
4465                 break;
4466         }
4467         IWM_UNLOCK(sc);
4468         IEEE80211_LOCK(ic);
4469
4470         return (ivp->iv_newstate(vap, nstate, arg));
4471 }
4472
4473 void
4474 iwm_endscan_cb(void *arg, int pending)
4475 {
4476         struct iwm_softc *sc = arg;
4477         struct ieee80211com *ic = &sc->sc_ic;
4478
4479         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4480             "%s: scan ended\n",
4481             __func__);
4482
4483         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4484 }
4485
4486 static int
4487 iwm_send_bt_init_conf(struct iwm_softc *sc)
4488 {
4489         struct iwm_bt_coex_cmd bt_cmd;
4490
4491         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4492         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4493
4494         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4495             &bt_cmd);
4496 }
4497
4498 static boolean_t
4499 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4500 {
4501         boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4502         boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4503                                         IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4504
4505         if (iwm_lar_disable)
4506                 return FALSE;
4507
4508         /*
4509          * Enable LAR only if it is supported by the FW (TLV) &&
4510          * enabled in the NVM
4511          */
4512         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4513                 return nvm_lar && tlv_lar;
4514         else
4515                 return tlv_lar;
4516 }
4517
4518 static boolean_t
4519 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4520 {
4521         return fw_has_api(&sc->sc_fw.ucode_capa,
4522                           IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4523                fw_has_capa(&sc->sc_fw.ucode_capa,
4524                            IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4525 }
4526
4527 static int
4528 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4529 {
4530         struct iwm_mcc_update_cmd mcc_cmd;
4531         struct iwm_host_cmd hcmd = {
4532                 .id = IWM_MCC_UPDATE_CMD,
4533                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4534                 .data = { &mcc_cmd },
4535         };
4536         int ret;
4537 #ifdef IWM_DEBUG
4538         struct iwm_rx_packet *pkt;
4539         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4540         struct iwm_mcc_update_resp *mcc_resp;
4541         int n_channels;
4542         uint16_t mcc;
4543 #endif
4544         int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4545             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4546
4547         if (!iwm_mvm_is_lar_supported(sc)) {
4548                 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4549                     __func__);
4550                 return 0;
4551         }
4552
4553         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4554         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4555         if (iwm_mvm_is_wifi_mcc_supported(sc))
4556                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4557         else
4558                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4559
4560         if (resp_v2)
4561                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4562         else
4563                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4564
4565         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4566             "send MCC update to FW with '%c%c' src = %d\n",
4567             alpha2[0], alpha2[1], mcc_cmd.source_id);
4568
4569         ret = iwm_send_cmd(sc, &hcmd);
4570         if (ret)
4571                 return ret;
4572
4573 #ifdef IWM_DEBUG
4574         pkt = hcmd.resp_pkt;
4575
4576         /* Extract MCC response */
4577         if (resp_v2) {
4578                 mcc_resp = (void *)pkt->data;
4579                 mcc = mcc_resp->mcc;
4580                 n_channels =  le32toh(mcc_resp->n_channels);
4581         } else {
4582                 mcc_resp_v1 = (void *)pkt->data;
4583                 mcc = mcc_resp_v1->mcc;
4584                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4585         }
4586
4587         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4588         if (mcc == 0)
4589                 mcc = 0x3030;  /* "00" - world */
4590
4591         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4592             "regulatory domain '%c%c' (%d channels available)\n",
4593             mcc >> 8, mcc & 0xff, n_channels);
4594 #endif
4595         iwm_free_resp(sc, &hcmd);
4596
4597         return 0;
4598 }
4599
4600 static void
4601 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4602 {
4603         struct iwm_host_cmd cmd = {
4604                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4605                 .len = { sizeof(uint32_t), },
4606                 .data = { &backoff, },
4607         };
4608
4609         if (iwm_send_cmd(sc, &cmd) != 0) {
4610                 device_printf(sc->sc_dev,
4611                     "failed to change thermal tx backoff\n");
4612         }
4613 }
4614
4615 static int
4616 iwm_init_hw(struct iwm_softc *sc)
4617 {
4618         struct ieee80211com *ic = &sc->sc_ic;
4619         int error, i, ac;
4620
4621         sc->sf_state = IWM_SF_UNINIT;
4622
4623         if ((error = iwm_start_hw(sc)) != 0) {
4624                 kprintf("iwm_start_hw: failed %d\n", error);
4625                 return error;
4626         }
4627
4628         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4629                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4630                 return error;
4631         }
4632
4633         /*
4634          * should stop and start HW since that INIT
4635          * image just loaded
4636          */
4637         iwm_stop_device(sc);
4638         sc->sc_ps_disabled = FALSE;
4639         if ((error = iwm_start_hw(sc)) != 0) {
4640                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4641                 return error;
4642         }
4643
4644         /* omstart, this time with the regular firmware */
4645         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4646         if (error) {
4647                 device_printf(sc->sc_dev, "could not load firmware\n");
4648                 goto error;
4649         }
4650
4651         error = iwm_mvm_sf_update(sc, NULL, FALSE);
4652         if (error)
4653                 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4654
4655         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4656                 device_printf(sc->sc_dev, "bt init conf failed\n");
4657                 goto error;
4658         }
4659
4660         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4661         if (error != 0) {
4662                 device_printf(sc->sc_dev, "antenna config failed\n");
4663                 goto error;
4664         }
4665
4666         /* Send phy db control command and then phy db calibration */
4667         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4668                 goto error;
4669
4670         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4671                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4672                 goto error;
4673         }
4674
4675         /* Add auxiliary station for scanning */
4676         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4677                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4678                 goto error;
4679         }
4680
4681         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4682                 /*
4683                  * The channel used here isn't relevant as it's
4684                  * going to be overwritten in the other flows.
4685                  * For now use the first channel we have.
4686                  */
4687                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4688                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4689                         goto error;
4690         }
4691
4692         /* Initialize tx backoffs to the minimum. */
4693         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4694                 iwm_mvm_tt_tx_backoff(sc, 0);
4695
4696         if (iwm_mvm_config_ltr(sc) != 0)
4697                 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4698
4699         error = iwm_mvm_power_update_device(sc);
4700         if (error)
4701                 goto error;
4702
4703         if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4704                 goto error;
4705
4706         if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4707                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4708                         goto error;
4709         }
4710
4711         /* Enable Tx queues. */
4712         for (ac = 0; ac < WME_NUM_AC; ac++) {
4713                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4714                     iwm_mvm_ac_to_tx_fifo[ac]);
4715                 if (error)
4716                         goto error;
4717         }
4718
4719         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4720                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4721                 goto error;
4722         }
4723
4724         return 0;
4725
4726  error:
4727         iwm_stop_device(sc);
4728         return error;
4729 }
4730
4731 /* Allow multicast from our BSSID. */
4732 static int
4733 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4734 {
4735         struct ieee80211_node *ni = vap->iv_bss;
4736         struct iwm_mcast_filter_cmd *cmd;
4737         size_t size;
4738         int error;
4739
4740         size = roundup(sizeof(*cmd), 4);
4741         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4742         if (cmd == NULL)
4743                 return ENOMEM;
4744         cmd->filter_own = 1;
4745         cmd->port_id = 0;
4746         cmd->count = 0;
4747         cmd->pass_all = 1;
4748         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4749
4750         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4751             IWM_CMD_SYNC, size, cmd);
4752         kfree(cmd, M_DEVBUF);
4753
4754         return (error);
4755 }
4756
4757 /*
4758  * ifnet interfaces
4759  */
4760
4761 static void
4762 iwm_init(struct iwm_softc *sc)
4763 {
4764         int error;
4765
4766         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4767                 return;
4768         }
4769         sc->sc_generation++;
4770         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4771
4772         if ((error = iwm_init_hw(sc)) != 0) {
4773                 kprintf("iwm_init_hw failed %d\n", error);
4774                 iwm_stop(sc);
4775                 return;
4776         }
4777
4778         /*
4779          * Ok, firmware loaded and we are jogging
4780          */
4781         sc->sc_flags |= IWM_FLAG_HW_INITED;
4782 }
4783
4784 static int
4785 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4786 {
4787         struct iwm_softc *sc;
4788         int error;
4789
4790         sc = ic->ic_softc;
4791
4792         IWM_LOCK(sc);
4793         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4794                 IWM_UNLOCK(sc);
4795                 return (ENXIO);
4796         }
4797         error = mbufq_enqueue(&sc->sc_snd, m);
4798         if (error) {
4799                 IWM_UNLOCK(sc);
4800                 return (error);
4801         }
4802         iwm_start(sc);
4803         IWM_UNLOCK(sc);
4804         return (0);
4805 }
4806
4807 /*
4808  * Dequeue packets from sendq and call send.
4809  */
4810 static void
4811 iwm_start(struct iwm_softc *sc)
4812 {
4813         struct ieee80211_node *ni;
4814         struct mbuf *m;
4815         int ac = 0;
4816
4817         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4818         while (sc->qfullmsk == 0 &&
4819                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4820                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4821                 if (iwm_tx(sc, m, ni, ac) != 0) {
4822                         if_inc_counter(ni->ni_vap->iv_ifp,
4823                             IFCOUNTER_OERRORS, 1);
4824                         ieee80211_free_node(ni);
4825                         continue;
4826                 }
4827                 if (sc->sc_tx_timer == 0) {
4828                         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4829                             sc);
4830                 }
4831                 sc->sc_tx_timer = 15;
4832         }
4833         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4834 }
4835
4836 static void
4837 iwm_stop(struct iwm_softc *sc)
4838 {
4839
4840         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4841         sc->sc_flags |= IWM_FLAG_STOPPED;
4842         sc->sc_generation++;
4843         iwm_led_blink_stop(sc);
4844         sc->sc_tx_timer = 0;
4845         iwm_stop_device(sc);
4846         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4847 }
4848
4849 static void
4850 iwm_watchdog(void *arg)
4851 {
4852         struct iwm_softc *sc = arg;
4853
4854         if (sc->sc_attached == 0)
4855                 return;
4856
4857         if (sc->sc_tx_timer > 0) {
4858                 if (--sc->sc_tx_timer == 0) {
4859                         device_printf(sc->sc_dev, "device timeout\n");
4860 #ifdef IWM_DEBUG
4861                         iwm_nic_error(sc);
4862 #endif
4863                         iwm_stop(sc);
4864 #if defined(__DragonFly__)
4865                         ++sc->sc_ic.ic_oerrors;
4866 #else
4867                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4868 #endif
4869                         return;
4870                 }
4871                 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4872         }
4873 }
4874
4875 static void
4876 iwm_parent(struct ieee80211com *ic)
4877 {
4878         struct iwm_softc *sc = ic->ic_softc;
4879         int startall = 0;
4880
4881         IWM_LOCK(sc);
4882         if (ic->ic_nrunning > 0) {
4883                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4884                         iwm_init(sc);
4885                         startall = 1;
4886                 }
4887         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4888                 iwm_stop(sc);
4889         IWM_UNLOCK(sc);
4890         if (startall)
4891                 ieee80211_start_all(ic);
4892 }
4893
4894 /*
4895  * The interrupt side of things
4896  */
4897
4898 /*
4899  * error dumping routines are from iwlwifi/mvm/utils.c
4900  */
4901
4902 /*
4903  * Note: This structure is read from the device with IO accesses,
4904  * and the reading already does the endian conversion. As it is
4905  * read with uint32_t-sized accesses, any members with a different size
4906  * need to be ordered correctly though!
4907  */
4908 struct iwm_error_event_table {
4909         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4910         uint32_t error_id;              /* type of error */
4911         uint32_t trm_hw_status0;        /* TRM HW status */
4912         uint32_t trm_hw_status1;        /* TRM HW status */
4913         uint32_t blink2;                /* branch link */
4914         uint32_t ilink1;                /* interrupt link */
4915         uint32_t ilink2;                /* interrupt link */
4916         uint32_t data1;         /* error-specific data */
4917         uint32_t data2;         /* error-specific data */
4918         uint32_t data3;         /* error-specific data */
4919         uint32_t bcon_time;             /* beacon timer */
4920         uint32_t tsf_low;               /* network timestamp function timer */
4921         uint32_t tsf_hi;                /* network timestamp function timer */
4922         uint32_t gp1;           /* GP1 timer register */
4923         uint32_t gp2;           /* GP2 timer register */
4924         uint32_t fw_rev_type;   /* firmware revision type */
4925         uint32_t major;         /* uCode version major */
4926         uint32_t minor;         /* uCode version minor */
4927         uint32_t hw_ver;                /* HW Silicon version */
4928         uint32_t brd_ver;               /* HW board version */
4929         uint32_t log_pc;                /* log program counter */
4930         uint32_t frame_ptr;             /* frame pointer */
4931         uint32_t stack_ptr;             /* stack pointer */
4932         uint32_t hcmd;          /* last host command header */
4933         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4934                                  * rxtx_flag */
4935         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4936                                  * host_flag */
4937         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4938                                  * enc_flag */
4939         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4940                                  * time_flag */
4941         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4942                                  * wico interrupt */
4943         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4944         uint32_t wait_event;            /* wait event() caller address */
4945         uint32_t l2p_control;   /* L2pControlField */
4946         uint32_t l2p_duration;  /* L2pDurationField */
4947         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4948         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4949         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4950                                  * (LMPM_PMG_SEL) */
4951         uint32_t u_timestamp;   /* indicate when the date and time of the
4952                                  * compilation */
4953         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4954 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4955
4956 /*
4957  * UMAC error struct - relevant starting from family 8000 chip.
4958  * Note: This structure is read from the device with IO accesses,
4959  * and the reading already does the endian conversion. As it is
4960  * read with u32-sized accesses, any members with a different size
4961  * need to be ordered correctly though!
4962  */
4963 struct iwm_umac_error_event_table {
4964         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4965         uint32_t error_id;      /* type of error */
4966         uint32_t blink1;        /* branch link */
4967         uint32_t blink2;        /* branch link */
4968         uint32_t ilink1;        /* interrupt link */
4969         uint32_t ilink2;        /* interrupt link */
4970         uint32_t data1;         /* error-specific data */
4971         uint32_t data2;         /* error-specific data */
4972         uint32_t data3;         /* error-specific data */
4973         uint32_t umac_major;
4974         uint32_t umac_minor;
4975         uint32_t frame_pointer; /* core register 27*/
4976         uint32_t stack_pointer; /* core register 28 */
4977         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4978         uint32_t nic_isr_pref;  /* ISR status register */
4979 } __packed;
4980
4981 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4982 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4983
4984 #ifdef IWM_DEBUG
4985 struct {
4986         const char *name;
4987         uint8_t num;
4988 } advanced_lookup[] = {
4989         { "NMI_INTERRUPT_WDG", 0x34 },
4990         { "SYSASSERT", 0x35 },
4991         { "UCODE_VERSION_MISMATCH", 0x37 },
4992         { "BAD_COMMAND", 0x38 },
4993         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4994         { "FATAL_ERROR", 0x3D },
4995         { "NMI_TRM_HW_ERR", 0x46 },
4996         { "NMI_INTERRUPT_TRM", 0x4C },
4997         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4998         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4999         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5000         { "NMI_INTERRUPT_HOST", 0x66 },
5001         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5002         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5003         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5004         { "ADVANCED_SYSASSERT", 0 },
5005 };
5006
5007 static const char *
5008 iwm_desc_lookup(uint32_t num)
5009 {
5010         int i;
5011
5012         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5013                 if (advanced_lookup[i].num == num)
5014                         return advanced_lookup[i].name;
5015
5016         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5017         return advanced_lookup[i].name;
5018 }
5019
5020 static void
5021 iwm_nic_umac_error(struct iwm_softc *sc)
5022 {
5023         struct iwm_umac_error_event_table table;
5024         uint32_t base;
5025
5026         base = sc->umac_error_event_table;
5027
5028         if (base < 0x800000) {
5029                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5030                     base);
5031                 return;
5032         }
5033
5034         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5035                 device_printf(sc->sc_dev, "reading errlog failed\n");
5036                 return;
5037         }
5038
5039         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5040                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5041                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5042                     sc->sc_flags, table.valid);
5043         }
5044
5045         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5046                 iwm_desc_lookup(table.error_id));
5047         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5048         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5049         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5050             table.ilink1);
5051         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5052             table.ilink2);
5053         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5054         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5055         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5056         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5057         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5058         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5059             table.frame_pointer);
5060         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5061             table.stack_pointer);
5062         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5063         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5064             table.nic_isr_pref);
5065 }
5066
5067 /*
5068  * Support for dumping the error log seemed like a good idea ...
5069  * but it's mostly hex junk and the only sensible thing is the
5070  * hw/ucode revision (which we know anyway).  Since it's here,
5071  * I'll just leave it in, just in case e.g. the Intel guys want to
5072  * help us decipher some "ADVANCED_SYSASSERT" later.
5073  */
5074 static void
5075 iwm_nic_error(struct iwm_softc *sc)
5076 {
5077         struct iwm_error_event_table table;
5078         uint32_t base;
5079
5080         device_printf(sc->sc_dev, "dumping device error log\n");
5081         base = sc->error_event_table;
5082         if (base < 0x800000) {
5083                 device_printf(sc->sc_dev,
5084                     "Invalid error log pointer 0x%08x\n", base);
5085                 return;
5086         }
5087
5088         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5089                 device_printf(sc->sc_dev, "reading errlog failed\n");
5090                 return;
5091         }
5092
5093         if (!table.valid) {
5094                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5095                 return;
5096         }
5097
5098         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5099                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5100                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5101                     sc->sc_flags, table.valid);
5102         }
5103
5104         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5105             iwm_desc_lookup(table.error_id));
5106         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5107             table.trm_hw_status0);
5108         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5109             table.trm_hw_status1);
5110         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5111         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5112         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5113         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5114         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5115         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5116         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5117         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5118         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5119         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5120         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5121         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5122             table.fw_rev_type);
5123         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5124         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5125         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5126         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5127         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5128         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5129         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5130         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5131         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5132         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5133         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5134         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5135         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5136         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5137         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5138         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5139         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5140         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5141         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5142
5143         if (sc->umac_error_event_table)
5144                 iwm_nic_umac_error(sc);
5145 }
5146 #endif
5147
5148 static void
5149 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5150 {
5151         struct ieee80211com *ic = &sc->sc_ic;
5152         struct iwm_cmd_response *cresp;
5153         struct mbuf *m1;
5154         uint32_t offset = 0;
5155         uint32_t maxoff = IWM_RBUF_SIZE;
5156         uint32_t nextoff;
5157         boolean_t stolen = FALSE;
5158
5159 #define HAVEROOM(a)     \
5160     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5161
5162         while (HAVEROOM(offset)) {
5163                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5164                     offset);
5165                 int qid, idx, code, len;
5166
5167                 qid = pkt->hdr.qid;
5168                 idx = pkt->hdr.idx;
5169
5170                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5171
5172                 /*
5173                  * randomly get these from the firmware, no idea why.
5174                  * they at least seem harmless, so just ignore them for now
5175                  */
5176                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5177                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5178                         break;
5179                 }
5180
5181                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5182                     "rx packet qid=%d idx=%d type=%x\n",
5183                     qid & ~0x80, pkt->hdr.idx, code);
5184
5185                 len = iwm_rx_packet_len(pkt);
5186                 len += sizeof(uint32_t); /* account for status word */
5187                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5188
5189                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5190
5191                 switch (code) {
5192                 case IWM_REPLY_RX_PHY_CMD:
5193                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5194                         break;
5195
5196                 case IWM_REPLY_RX_MPDU_CMD: {
5197                         /*
5198                          * If this is the last frame in the RX buffer, we
5199                          * can directly feed the mbuf to the sharks here.
5200                          */
5201                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5202                             struct iwm_rx_packet *, nextoff);
5203                         if (!HAVEROOM(nextoff) ||
5204                             (nextpkt->hdr.code == 0 &&
5205                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5206                              nextpkt->hdr.idx == 0) ||
5207                             (nextpkt->len_n_flags ==
5208                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5209                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5210                                         stolen = FALSE;
5211                                         /* Make sure we abort the loop */
5212                                         nextoff = maxoff;
5213                                 }
5214                                 break;
5215                         }
5216
5217                         /*
5218                          * Use m_copym instead of m_split, because that
5219                          * makes it easier to keep a valid rx buffer in
5220                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5221                          *
5222                          * We need to start m_copym() at offset 0, to get the
5223                          * M_PKTHDR flag preserved.
5224                          */
5225                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5226                         if (m1) {
5227                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5228                                         stolen = TRUE;
5229                                 else
5230                                         m_freem(m1);
5231                         }
5232                         break;
5233                 }
5234
5235                 case IWM_TX_CMD:
5236                         iwm_mvm_rx_tx_cmd(sc, pkt);
5237                         break;
5238
5239                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5240                         struct iwm_missed_beacons_notif *resp;
5241                         int missed;
5242
5243                         /* XXX look at mac_id to determine interface ID */
5244                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5245
5246                         resp = (void *)pkt->data;
5247                         missed = le32toh(resp->consec_missed_beacons);
5248
5249                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5250                             "%s: MISSED_BEACON: mac_id=%d, "
5251                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5252                             "num_rx=%d\n",
5253                             __func__,
5254                             le32toh(resp->mac_id),
5255                             le32toh(resp->consec_missed_beacons_since_last_rx),
5256                             le32toh(resp->consec_missed_beacons),
5257                             le32toh(resp->num_expected_beacons),
5258                             le32toh(resp->num_recvd_beacons));
5259
5260                         /* Be paranoid */
5261                         if (vap == NULL)
5262                                 break;
5263
5264                         /* XXX no net80211 locking? */
5265                         if (vap->iv_state == IEEE80211_S_RUN &&
5266                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5267                                 if (missed > vap->iv_bmissthreshold) {
5268                                         /* XXX bad locking; turn into task */
5269                                         IWM_UNLOCK(sc);
5270                                         ieee80211_beacon_miss(ic);
5271                                         IWM_LOCK(sc);
5272                                 }
5273                         }
5274
5275                         break; }
5276
5277                 case IWM_MFUART_LOAD_NOTIFICATION:
5278                         break;
5279
5280                 case IWM_MVM_ALIVE:
5281                         break;
5282
5283                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5284                         break;
5285
5286                 case IWM_STATISTICS_NOTIFICATION:
5287                         iwm_mvm_handle_rx_statistics(sc, pkt);
5288                         break;
5289
5290                 case IWM_NVM_ACCESS_CMD:
5291                 case IWM_MCC_UPDATE_CMD:
5292                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5293                                 memcpy(sc->sc_cmd_resp,
5294                                     pkt, sizeof(sc->sc_cmd_resp));
5295                         }
5296                         break;
5297
5298                 case IWM_MCC_CHUB_UPDATE_CMD: {
5299                         struct iwm_mcc_chub_notif *notif;
5300                         notif = (void *)pkt->data;
5301
5302                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5303                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5304                         sc->sc_fw_mcc[2] = '\0';
5305                         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5306                             "fw source %d sent CC '%s'\n",
5307                             notif->source_id, sc->sc_fw_mcc);
5308                         break;
5309                 }
5310
5311                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5312                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5313                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5314                         struct iwm_dts_measurement_notif_v1 *notif;
5315
5316                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5317                                 device_printf(sc->sc_dev,
5318                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5319                                 break;
5320                         }
5321                         notif = (void *)pkt->data;
5322                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5323                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5324                             notif->temp);
5325                         break;
5326                 }
5327
5328                 case IWM_PHY_CONFIGURATION_CMD:
5329                 case IWM_TX_ANT_CONFIGURATION_CMD:
5330                 case IWM_ADD_STA:
5331                 case IWM_MAC_CONTEXT_CMD:
5332                 case IWM_REPLY_SF_CFG_CMD:
5333                 case IWM_POWER_TABLE_CMD:
5334                 case IWM_LTR_CONFIG:
5335                 case IWM_PHY_CONTEXT_CMD:
5336                 case IWM_BINDING_CONTEXT_CMD:
5337                 case IWM_TIME_EVENT_CMD:
5338                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5339                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5340                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5341                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5342                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5343                 case IWM_REPLY_BEACON_FILTERING_CMD:
5344                 case IWM_MAC_PM_POWER_TABLE:
5345                 case IWM_TIME_QUOTA_CMD:
5346                 case IWM_REMOVE_STA:
5347                 case IWM_TXPATH_FLUSH:
5348                 case IWM_LQ_CMD:
5349                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5350                                  IWM_FW_PAGING_BLOCK_CMD):
5351                 case IWM_BT_CONFIG:
5352                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5353                         cresp = (void *)pkt->data;
5354                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5355                                 memcpy(sc->sc_cmd_resp,
5356                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5357                         }
5358                         break;
5359
5360                 /* ignore */
5361                 case IWM_PHY_DB_CMD:
5362                         break;
5363
5364                 case IWM_INIT_COMPLETE_NOTIF:
5365                         break;
5366
5367                 case IWM_SCAN_OFFLOAD_COMPLETE:
5368                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5369                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5370                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5371                                 ieee80211_runtask(ic, &sc->sc_es_task);
5372                         }
5373                         break;
5374
5375                 case IWM_SCAN_ITERATION_COMPLETE: {
5376                         struct iwm_lmac_scan_complete_notif *notif;
5377                         notif = (void *)pkt->data;
5378                         break;
5379                 }
5380
5381                 case IWM_SCAN_COMPLETE_UMAC:
5382                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5383                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5384                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5385                                 ieee80211_runtask(ic, &sc->sc_es_task);
5386                         }
5387                         break;
5388
5389                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5390                         struct iwm_umac_scan_iter_complete_notif *notif;
5391                         notif = (void *)pkt->data;
5392
5393                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5394                             "complete, status=0x%x, %d channels scanned\n",
5395                             notif->status, notif->scanned_channels);
5396                         break;
5397                 }
5398
5399                 case IWM_REPLY_ERROR: {
5400                         struct iwm_error_resp *resp;
5401                         resp = (void *)pkt->data;
5402
5403                         device_printf(sc->sc_dev,
5404                             "firmware error 0x%x, cmd 0x%x\n",
5405                             le32toh(resp->error_type),
5406                             resp->cmd_id);
5407                         break;
5408                 }
5409
5410                 case IWM_TIME_EVENT_NOTIFICATION:
5411                         iwm_mvm_rx_time_event_notif(sc, pkt);
5412                         break;
5413
5414                 /*
5415                  * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5416                  * messages. Just ignore them for now.
5417                  */
5418                 case IWM_DEBUG_LOG_MSG:
5419                         break;
5420
5421                 case IWM_MCAST_FILTER_CMD:
5422                         break;
5423
5424                 case IWM_SCD_QUEUE_CFG: {
5425                         struct iwm_scd_txq_cfg_rsp *rsp;
5426                         rsp = (void *)pkt->data;
5427
5428                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5429                             "queue cfg token=0x%x sta_id=%d "
5430                             "tid=%d scd_queue=%d\n",
5431                             rsp->token, rsp->sta_id, rsp->tid,
5432                             rsp->scd_queue);
5433                         break;
5434                 }
5435
5436                 default:
5437                         device_printf(sc->sc_dev,
5438                             "frame %d/%d %x UNHANDLED (this should "
5439                             "not happen)\n", qid & ~0x80, idx,
5440                             pkt->len_n_flags);
5441                         break;
5442                 }
5443
5444                 /*
5445                  * Why test bit 0x80?  The Linux driver:
5446                  *
5447                  * There is one exception:  uCode sets bit 15 when it
5448                  * originates the response/notification, i.e. when the
5449                  * response/notification is not a direct response to a
5450                  * command sent by the driver.  For example, uCode issues
5451                  * IWM_REPLY_RX when it sends a received frame to the driver;
5452                  * it is not a direct response to any driver command.
5453                  *
5454                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5455                  * uses a slightly different format for pkt->hdr, and "qid"
5456                  * is actually the upper byte of a two-byte field.
5457                  */
5458                 if (!(qid & (1 << 7)))
5459                         iwm_cmd_done(sc, pkt);
5460
5461                 offset = nextoff;
5462         }
5463         if (stolen)
5464                 m_freem(m);
5465 #undef HAVEROOM
5466 }
5467
5468 /*
5469  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5470  * Basic structure from if_iwn
5471  */
5472 static void
5473 iwm_notif_intr(struct iwm_softc *sc)
5474 {
5475         uint16_t hw;
5476
5477         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5478             BUS_DMASYNC_POSTREAD);
5479
5480         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5481
5482         /*
5483          * Process responses
5484          */
5485         while (sc->rxq.cur != hw) {
5486                 struct iwm_rx_ring *ring = &sc->rxq;
5487                 struct iwm_rx_data *data = &ring->data[ring->cur];
5488
5489                 bus_dmamap_sync(ring->data_dmat, data->map,
5490                     BUS_DMASYNC_POSTREAD);
5491
5492                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5493                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5494                 iwm_handle_rxb(sc, data->m);
5495
5496                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5497         }
5498
5499         /*
5500          * Tell the firmware that it can reuse the ring entries that
5501          * we have just processed.
5502          * Seems like the hardware gets upset unless we align
5503          * the write by 8??
5504          */
5505         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5506         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5507 }
5508
5509 static void
5510 iwm_intr(void *arg)
5511 {
5512         struct iwm_softc *sc = arg;
5513         int handled = 0;
5514         int r1, r2, rv = 0;
5515         int isperiodic = 0;
5516
5517 #if defined(__DragonFly__)
5518         if (sc->sc_mem == NULL) {
5519                 kprintf("iwm_intr: detached\n");
5520                 return;
5521         }
5522 #endif
5523         IWM_LOCK(sc);
5524         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5525
5526         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5527                 uint32_t *ict = sc->ict_dma.vaddr;
5528                 int tmp;
5529
5530                 tmp = htole32(ict[sc->ict_cur]);
5531                 if (!tmp)
5532                         goto out_ena;
5533
5534                 /*
5535                  * ok, there was something.  keep plowing until we have all.
5536                  */
5537                 r1 = r2 = 0;
5538                 while (tmp) {
5539                         r1 |= tmp;
5540                         ict[sc->ict_cur] = 0;
5541                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5542                         tmp = htole32(ict[sc->ict_cur]);
5543                 }
5544
5545                 /* this is where the fun begins.  don't ask */
5546                 if (r1 == 0xffffffff)
5547                         r1 = 0;
5548
5549                 /* i am not expected to understand this */
5550                 if (r1 & 0xc0000)
5551                         r1 |= 0x8000;
5552                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5553         } else {
5554                 r1 = IWM_READ(sc, IWM_CSR_INT);
5555                 /* "hardware gone" (where, fishing?) */
5556                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5557                         goto out;
5558                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5559         }
5560         if (r1 == 0 && r2 == 0) {
5561                 goto out_ena;
5562         }
5563
5564         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5565
5566         /* Safely ignore these bits for debug checks below */
5567         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5568
5569         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5570                 int i;
5571                 struct ieee80211com *ic = &sc->sc_ic;
5572                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5573
5574 #ifdef IWM_DEBUG
5575                 iwm_nic_error(sc);
5576 #endif
5577                 /* Dump driver status (TX and RX rings) while we're here. */
5578                 device_printf(sc->sc_dev, "driver status:\n");
5579                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5580                         struct iwm_tx_ring *ring = &sc->txq[i];
5581                         device_printf(sc->sc_dev,
5582                             "  tx ring %2d: qid=%-2d cur=%-3d "
5583                             "queued=%-3d\n",
5584                             i, ring->qid, ring->cur, ring->queued);
5585                 }
5586                 device_printf(sc->sc_dev,
5587                     "  rx ring: cur=%d\n", sc->rxq.cur);
5588                 device_printf(sc->sc_dev,
5589                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5590
5591                 /* Reset our firmware state tracking. */
5592                 sc->sc_firmware_state = 0;
5593                 /* Don't stop the device; just do a VAP restart */
5594                 IWM_UNLOCK(sc);
5595
5596                 if (vap == NULL) {
5597                         kprintf("%s: null vap\n", __func__);
5598                         return;
5599                 }
5600
5601                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5602                     "restarting\n", __func__, vap->iv_state);
5603
5604                 ieee80211_restart_all(ic);
5605                 return;
5606         }
5607
5608         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5609                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5610                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5611                 iwm_stop(sc);
5612                 rv = 1;
5613                 goto out;
5614         }
5615
5616         /* firmware chunk loaded */
5617         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5618                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5619                 handled |= IWM_CSR_INT_BIT_FH_TX;
5620                 sc->sc_fw_chunk_done = 1;
5621                 wakeup(&sc->sc_fw);
5622         }
5623
5624         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5625                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5626                 if (iwm_check_rfkill(sc)) {
5627                         device_printf(sc->sc_dev,
5628                             "%s: rfkill switch, disabling interface\n",
5629                             __func__);
5630                         iwm_stop(sc);
5631                 }
5632         }
5633
5634         /*
5635          * The Linux driver uses periodic interrupts to avoid races.
5636          * We cargo-cult like it's going out of fashion.
5637          */
5638         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5639                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5640                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5641                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5642                         IWM_WRITE_1(sc,
5643                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5644                 isperiodic = 1;
5645         }
5646
5647         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5648                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5649                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5650
5651                 iwm_notif_intr(sc);
5652
5653                 /* enable periodic interrupt, see above */
5654                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5655                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5656                             IWM_CSR_INT_PERIODIC_ENA);
5657         }
5658
5659         if (__predict_false(r1 & ~handled))
5660                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5661                     "%s: unhandled interrupts: %x\n", __func__, r1);
5662         rv = 1;
5663
5664  out_ena:
5665         iwm_restore_interrupts(sc);
5666  out:
5667         IWM_UNLOCK(sc);
5668         return;
5669 }
5670
5671 /*
5672  * Autoconf glue-sniffing
5673  */
5674 #define PCI_VENDOR_INTEL                0x8086
5675 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5676 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5677 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5678 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5679 #define PCI_PRODUCT_INTEL_WL_3168       0x24fb
5680 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5681 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5682 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5683 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5684 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5685 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5686 #define PCI_PRODUCT_INTEL_WL_8265       0x24fd
5687
5688 static const struct iwm_devices {
5689         uint16_t                device;
5690         const struct iwm_cfg    *cfg;
5691 } iwm_devices[] = {
5692         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5693         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5694         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5695         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5696         { PCI_PRODUCT_INTEL_WL_3168,   &iwm3168_cfg },
5697         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5698         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5699         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5700         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5701         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5702         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5703         { PCI_PRODUCT_INTEL_WL_8265,   &iwm8265_cfg },
5704 };
5705
5706 static int
5707 iwm_probe(device_t dev)
5708 {
5709         int i;
5710
5711         for (i = 0; i < nitems(iwm_devices); i++) {
5712                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5713                     pci_get_device(dev) == iwm_devices[i].device) {
5714                         device_set_desc(dev, iwm_devices[i].cfg->name);
5715                         return (BUS_PROBE_DEFAULT);
5716                 }
5717         }
5718
5719         return (ENXIO);
5720 }
5721
5722 static int
5723 iwm_dev_check(device_t dev)
5724 {
5725         struct iwm_softc *sc;
5726         uint16_t devid;
5727         int i;
5728
5729         sc = device_get_softc(dev);
5730
5731         devid = pci_get_device(dev);
5732         for (i = 0; i < NELEM(iwm_devices); i++) {
5733                 if (iwm_devices[i].device == devid) {
5734                         sc->cfg = iwm_devices[i].cfg;
5735                         return (0);
5736                 }
5737         }
5738         device_printf(dev, "unknown adapter type\n");
5739         return ENXIO;
5740 }
5741
5742 /* PCI registers */
5743 #define PCI_CFG_RETRY_TIMEOUT   0x041
5744
5745 static int
5746 iwm_pci_attach(device_t dev)
5747 {
5748         struct iwm_softc *sc;
5749         int count, error, rid;
5750         uint16_t reg;
5751 #if defined(__DragonFly__)
5752         int irq_flags;
5753 #endif
5754
5755         sc = device_get_softc(dev);
5756
5757         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5758          * PCI Tx retries from interfering with C3 CPU state */
5759         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5760
5761         /* Enable bus-mastering and hardware bug workaround. */
5762         pci_enable_busmaster(dev);
5763         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5764         /* if !MSI */
5765         if (reg & PCIM_STATUS_INTxSTATE) {
5766                 reg &= ~PCIM_STATUS_INTxSTATE;
5767         }
5768         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5769
5770         rid = PCIR_BAR(0);
5771         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5772             RF_ACTIVE);
5773         if (sc->sc_mem == NULL) {
5774                 device_printf(sc->sc_dev, "can't map mem space\n");
5775                 return (ENXIO);
5776         }
5777         sc->sc_st = rman_get_bustag(sc->sc_mem);
5778         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5779
5780         /* Install interrupt handler. */
5781         count = 1;
5782         rid = 0;
5783 #if defined(__DragonFly__)
5784         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5785         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5786 #else
5787         if (pci_alloc_msi(dev, &count) == 0)
5788                 rid = 1;
5789         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5790             (rid != 0 ? 0 : RF_SHAREABLE));
5791 #endif
5792         if (sc->sc_irq == NULL) {
5793                 device_printf(dev, "can't map interrupt\n");
5794                         return (ENXIO);
5795         }
5796 #if defined(__DragonFly__)
5797         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5798                                iwm_intr, sc, &sc->sc_ih,
5799                                &wlan_global_serializer);
5800 #else
5801         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5802             NULL, iwm_intr, sc, &sc->sc_ih);
5803 #endif
5804         if (sc->sc_ih == NULL) {
5805                 device_printf(dev, "can't establish interrupt");
5806 #if defined(__DragonFly__)
5807                 pci_release_msi(dev);
5808 #endif
5809                         return (ENXIO);
5810         }
5811         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5812
5813         return (0);
5814 }
5815
5816 static void
5817 iwm_pci_detach(device_t dev)
5818 {
5819         struct iwm_softc *sc = device_get_softc(dev);
5820
5821         if (sc->sc_irq != NULL) {
5822                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5823                 bus_release_resource(dev, SYS_RES_IRQ,
5824                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5825                 pci_release_msi(dev);
5826 #if defined(__DragonFly__)
5827                 sc->sc_irq = NULL;
5828 #endif
5829         }
5830         if (sc->sc_mem != NULL) {
5831                 bus_release_resource(dev, SYS_RES_MEMORY,
5832                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5833 #if defined(__DragonFly__)
5834                 sc->sc_mem = NULL;
5835 #endif
5836         }
5837 }
5838
5839
5840
5841 static int
5842 iwm_attach(device_t dev)
5843 {
5844         struct iwm_softc *sc = device_get_softc(dev);
5845         struct ieee80211com *ic = &sc->sc_ic;
5846         int error;
5847         int txq_i, i;
5848
5849         sc->sc_dev = dev;
5850         sc->sc_attached = 1;
5851         IWM_LOCK_INIT(sc);
5852         mbufq_init(&sc->sc_snd, ifqmaxlen);
5853 #if defined(__DragonFly__)
5854         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5855 #else
5856         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5857 #endif
5858         callout_init(&sc->sc_led_blink_to);
5859         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5860
5861         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5862         if (sc->sc_notif_wait == NULL) {
5863                 device_printf(dev, "failed to init notification wait struct\n");
5864                 goto fail;
5865         }
5866
5867         sc->sf_state = IWM_SF_UNINIT;
5868
5869         /* Init phy db */
5870         sc->sc_phy_db = iwm_phy_db_init(sc);
5871         if (!sc->sc_phy_db) {
5872                 device_printf(dev, "Cannot init phy_db\n");
5873                 goto fail;
5874         }
5875
5876         /* Set EBS as successful as long as not stated otherwise by the FW. */
5877         sc->last_ebs_successful = TRUE;
5878
5879         /* PCI attach */
5880         error = iwm_pci_attach(dev);
5881         if (error != 0)
5882                 goto fail;
5883
5884         sc->sc_wantresp = -1;
5885
5886         /* Match device id */
5887         error = iwm_dev_check(dev);
5888         if (error != 0)
5889                 goto fail;
5890
5891         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5892         /*
5893          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5894          * changed, and now the revision step also includes bit 0-1 (no more
5895          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5896          * in the old format.
5897          */
5898         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5899                 int ret;
5900                 uint32_t hw_step;
5901
5902                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5903                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5904
5905                 if (iwm_prepare_card_hw(sc) != 0) {
5906                         device_printf(dev, "could not initialize hardware\n");
5907                         goto fail;
5908                 }
5909
5910                 /*
5911                  * In order to recognize C step the driver should read the
5912                  * chip version id located at the AUX bus MISC address.
5913                  */
5914                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5915                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5916                 DELAY(2);
5917
5918                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5919                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5920                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5921                                    25000);
5922                 if (!ret) {
5923                         device_printf(sc->sc_dev,
5924                             "Failed to wake up the nic\n");
5925                         goto fail;
5926                 }
5927
5928                 if (iwm_nic_lock(sc)) {
5929                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5930                         hw_step |= IWM_ENABLE_WFPM;
5931                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5932                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5933                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5934                         if (hw_step == 0x3)
5935                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5936                                                 (IWM_SILICON_C_STEP << 2);
5937                         iwm_nic_unlock(sc);
5938                 } else {
5939                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5940                         goto fail;
5941                 }
5942         }
5943
5944         /* special-case 7265D, it has the same PCI IDs. */
5945         if (sc->cfg == &iwm7265_cfg &&
5946             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5947                 sc->cfg = &iwm7265d_cfg;
5948         }
5949
5950         /* Allocate DMA memory for firmware transfers. */
5951         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5952                 device_printf(dev, "could not allocate memory for firmware\n");
5953                 goto fail;
5954         }
5955
5956         /* Allocate "Keep Warm" page. */
5957         if ((error = iwm_alloc_kw(sc)) != 0) {
5958                 device_printf(dev, "could not allocate keep warm page\n");
5959                 goto fail;
5960         }
5961
5962         /* We use ICT interrupts */
5963         if ((error = iwm_alloc_ict(sc)) != 0) {
5964                 device_printf(dev, "could not allocate ICT table\n");
5965                 goto fail;
5966         }
5967
5968         /* Allocate TX scheduler "rings". */
5969         if ((error = iwm_alloc_sched(sc)) != 0) {
5970                 device_printf(dev, "could not allocate TX scheduler rings\n");
5971                 goto fail;
5972         }
5973
5974         /* Allocate TX rings */
5975         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5976                 if ((error = iwm_alloc_tx_ring(sc,
5977                     &sc->txq[txq_i], txq_i)) != 0) {
5978                         device_printf(dev,
5979                             "could not allocate TX ring %d\n",
5980                             txq_i);
5981                         goto fail;
5982                 }
5983         }
5984
5985         /* Allocate RX ring. */
5986         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5987                 device_printf(dev, "could not allocate RX ring\n");
5988                 goto fail;
5989         }
5990
5991         /* Clear pending interrupts. */
5992         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5993
5994         ic->ic_softc = sc;
5995         ic->ic_name = device_get_nameunit(sc->sc_dev);
5996         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5997         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5998
5999         /* Set device capabilities. */
6000         ic->ic_caps =
6001             IEEE80211_C_STA |
6002             IEEE80211_C_WPA |           /* WPA/RSN */
6003             IEEE80211_C_WME |
6004             IEEE80211_C_PMGT |
6005             IEEE80211_C_SHSLOT |        /* short slot time supported */
6006             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6007 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6008             ;
6009         /* Advertise full-offload scanning */
6010         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6011         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6012                 sc->sc_phyctxt[i].id = i;
6013                 sc->sc_phyctxt[i].color = 0;
6014                 sc->sc_phyctxt[i].ref = 0;
6015                 sc->sc_phyctxt[i].channel = NULL;
6016         }
6017
6018         /* Default noise floor */
6019         sc->sc_noise = -96;
6020
6021         /* Max RSSI */
6022         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6023
6024 #ifdef IWM_DEBUG
6025         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6026             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6027             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6028 #endif
6029
6030         error = iwm_read_firmware(sc);
6031         if (error) {
6032                 goto fail;
6033         } else if (sc->sc_fw.fw_fp == NULL) {
6034                 /*
6035                  * XXX Add a solution for properly deferring firmware load
6036                  *     during bootup.
6037                  */
6038                 goto fail;
6039         } else {
6040                 sc->sc_preinit_hook.ich_func = iwm_preinit;
6041                 sc->sc_preinit_hook.ich_arg = sc;
6042                 sc->sc_preinit_hook.ich_desc = "iwm";
6043                 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6044                         device_printf(dev,
6045                             "config_intrhook_establish failed\n");
6046                         goto fail;
6047                 }
6048         }
6049
6050         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6051             "<-%s\n", __func__);
6052
6053         return 0;
6054
6055         /* Free allocated memory if something failed during attachment. */
6056 fail:
6057         iwm_detach_local(sc, 0);
6058
6059         return ENXIO;
6060 }
6061
6062 static int
6063 iwm_is_valid_ether_addr(uint8_t *addr)
6064 {
6065         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6066
6067         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6068                 return (FALSE);
6069
6070         return (TRUE);
6071 }
6072
6073 static int
6074 iwm_wme_update(struct ieee80211com *ic)
6075 {
6076 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6077         struct iwm_softc *sc = ic->ic_softc;
6078         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6079         struct iwm_vap *ivp = IWM_VAP(vap);
6080         struct iwm_node *in;
6081         struct wmeParams tmp[WME_NUM_AC];
6082         int aci, error;
6083
6084         if (vap == NULL)
6085                 return (0);
6086
6087         IEEE80211_LOCK(ic);
6088         for (aci = 0; aci < WME_NUM_AC; aci++)
6089                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6090         IEEE80211_UNLOCK(ic);
6091
6092         IWM_LOCK(sc);
6093         for (aci = 0; aci < WME_NUM_AC; aci++) {
6094                 const struct wmeParams *ac = &tmp[aci];
6095                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6096                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6097                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6098                 ivp->queue_params[aci].edca_txop =
6099                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6100         }
6101         ivp->have_wme = TRUE;
6102         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6103                 in = IWM_NODE(vap->iv_bss);
6104                 if (in->in_assoc) {
6105                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6106                                 device_printf(sc->sc_dev,
6107                                     "%s: failed to update MAC\n", __func__);
6108                         }
6109                 }
6110         }
6111         IWM_UNLOCK(sc);
6112
6113         return (0);
6114 #undef IWM_EXP2
6115 }
6116
6117 static void
6118 iwm_preinit(void *arg)
6119 {
6120         struct iwm_softc *sc = arg;
6121         device_t dev = sc->sc_dev;
6122         struct ieee80211com *ic = &sc->sc_ic;
6123         int error;
6124
6125         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6126             "->%s\n", __func__);
6127
6128         IWM_LOCK(sc);
6129         if ((error = iwm_start_hw(sc)) != 0) {
6130                 device_printf(dev, "could not initialize hardware\n");
6131                 IWM_UNLOCK(sc);
6132                 goto fail;
6133         }
6134
6135         error = iwm_run_init_mvm_ucode(sc, 1);
6136         iwm_stop_device(sc);
6137         if (error) {
6138                 IWM_UNLOCK(sc);
6139                 goto fail;
6140         }
6141         device_printf(dev,
6142             "hw rev 0x%x, fw ver %s, address %s\n",
6143             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6144             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6145
6146         /* not all hardware can do 5GHz band */
6147         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6148                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6149                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6150         IWM_UNLOCK(sc);
6151
6152         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6153             ic->ic_channels);
6154
6155         /*
6156          * At this point we've committed - if we fail to do setup,
6157          * we now also have to tear down the net80211 state.
6158          */
6159         ieee80211_ifattach(ic);
6160         ic->ic_vap_create = iwm_vap_create;
6161         ic->ic_vap_delete = iwm_vap_delete;
6162         ic->ic_raw_xmit = iwm_raw_xmit;
6163         ic->ic_node_alloc = iwm_node_alloc;
6164         ic->ic_scan_start = iwm_scan_start;
6165         ic->ic_scan_end = iwm_scan_end;
6166         ic->ic_update_mcast = iwm_update_mcast;
6167         ic->ic_getradiocaps = iwm_init_channel_map;
6168         ic->ic_set_channel = iwm_set_channel;
6169         ic->ic_scan_curchan = iwm_scan_curchan;
6170         ic->ic_scan_mindwell = iwm_scan_mindwell;
6171         ic->ic_wme.wme_update = iwm_wme_update;
6172         ic->ic_parent = iwm_parent;
6173         ic->ic_transmit = iwm_transmit;
6174         iwm_radiotap_attach(sc);
6175         if (bootverbose)
6176                 ieee80211_announce(ic);
6177
6178         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6179             "<-%s\n", __func__);
6180         config_intrhook_disestablish(&sc->sc_preinit_hook);
6181
6182         return;
6183 fail:
6184         config_intrhook_disestablish(&sc->sc_preinit_hook);
6185         iwm_detach_local(sc, 0);
6186 }
6187
6188 /*
6189  * Attach the interface to 802.11 radiotap.
6190  */
6191 static void
6192 iwm_radiotap_attach(struct iwm_softc *sc)
6193 {
6194         struct ieee80211com *ic = &sc->sc_ic;
6195
6196         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6197             "->%s begin\n", __func__);
6198         ieee80211_radiotap_attach(ic,
6199             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6200                 IWM_TX_RADIOTAP_PRESENT,
6201             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6202                 IWM_RX_RADIOTAP_PRESENT);
6203         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6204             "->%s end\n", __func__);
6205 }
6206
6207 static struct ieee80211vap *
6208 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6209     enum ieee80211_opmode opmode, int flags,
6210     const uint8_t bssid[IEEE80211_ADDR_LEN],
6211     const uint8_t mac[IEEE80211_ADDR_LEN])
6212 {
6213         struct iwm_vap *ivp;
6214         struct ieee80211vap *vap;
6215
6216         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6217                 return NULL;
6218         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6219         vap = &ivp->iv_vap;
6220         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6221         vap->iv_bmissthreshold = 10;            /* override default */
6222         /* Override with driver methods. */
6223         ivp->iv_newstate = vap->iv_newstate;
6224         vap->iv_newstate = iwm_newstate;
6225
6226         ivp->id = IWM_DEFAULT_MACID;
6227         ivp->color = IWM_DEFAULT_COLOR;
6228
6229         ivp->have_wme = FALSE;
6230         ivp->ps_disabled = FALSE;
6231
6232         ieee80211_ratectl_init(vap);
6233         /* Complete setup. */
6234         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6235             mac);
6236         ic->ic_opmode = opmode;
6237
6238         return vap;
6239 }
6240
6241 static void
6242 iwm_vap_delete(struct ieee80211vap *vap)
6243 {
6244         struct iwm_vap *ivp = IWM_VAP(vap);
6245
6246         ieee80211_ratectl_deinit(vap);
6247         ieee80211_vap_detach(vap);
6248         kfree(ivp, M_80211_VAP);
6249 }
6250
6251 static void
6252 iwm_xmit_queue_drain(struct iwm_softc *sc)
6253 {
6254         struct mbuf *m;
6255         struct ieee80211_node *ni;
6256
6257         while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6258                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6259                 ieee80211_free_node(ni);
6260                 m_freem(m);
6261         }
6262 }
6263
6264 static void
6265 iwm_scan_start(struct ieee80211com *ic)
6266 {
6267         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6268         struct iwm_softc *sc = ic->ic_softc;
6269         int error;
6270
6271         IWM_LOCK(sc);
6272         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6273                 /* This should not be possible */
6274                 device_printf(sc->sc_dev,
6275                     "%s: Previous scan not completed yet\n", __func__);
6276         }
6277         if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6278                 error = iwm_mvm_umac_scan(sc);
6279         else
6280                 error = iwm_mvm_lmac_scan(sc);
6281         if (error != 0) {
6282                 device_printf(sc->sc_dev, "could not initiate scan\n");
6283                 IWM_UNLOCK(sc);
6284                 ieee80211_cancel_scan(vap);
6285         } else {
6286                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6287                 iwm_led_blink_start(sc);
6288                 IWM_UNLOCK(sc);
6289         }
6290 }
6291
6292 static void
6293 iwm_scan_end(struct ieee80211com *ic)
6294 {
6295         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6296         struct iwm_softc *sc = ic->ic_softc;
6297
6298         IWM_LOCK(sc);
6299         iwm_led_blink_stop(sc);
6300         if (vap->iv_state == IEEE80211_S_RUN)
6301                 iwm_mvm_led_enable(sc);
6302         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6303                 /*
6304                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6305                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6306                  * taskqueue.
6307                  */
6308                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6309                 iwm_mvm_scan_stop_wait(sc);
6310         }
6311         IWM_UNLOCK(sc);
6312
6313         /*
6314          * Make sure we don't race, if sc_es_task is still enqueued here.
6315          * This is to make sure that it won't call ieee80211_scan_done
6316          * when we have already started the next scan.
6317          */
6318         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6319 }
6320
6321 static void
6322 iwm_update_mcast(struct ieee80211com *ic)
6323 {
6324 }
6325
6326 static void
6327 iwm_set_channel(struct ieee80211com *ic)
6328 {
6329 }
6330
6331 static void
6332 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6333 {
6334 }
6335
6336 static void
6337 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6338 {
6339         return;
6340 }
6341
6342 void
6343 iwm_init_task(void *arg1)
6344 {
6345         struct iwm_softc *sc = arg1;
6346
6347         IWM_LOCK(sc);
6348         while (sc->sc_flags & IWM_FLAG_BUSY) {
6349 #if defined(__DragonFly__)
6350                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6351 #else
6352                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6353 #endif
6354 }
6355         sc->sc_flags |= IWM_FLAG_BUSY;
6356         iwm_stop(sc);
6357         if (sc->sc_ic.ic_nrunning > 0)
6358                 iwm_init(sc);
6359         sc->sc_flags &= ~IWM_FLAG_BUSY;
6360         wakeup(&sc->sc_flags);
6361         IWM_UNLOCK(sc);
6362 }
6363
6364 static int
6365 iwm_resume(device_t dev)
6366 {
6367         struct iwm_softc *sc = device_get_softc(dev);
6368         int do_reinit = 0;
6369
6370         /*
6371          * We disable the RETRY_TIMEOUT register (0x41) to keep
6372          * PCI Tx retries from interfering with C3 CPU state.
6373          */
6374         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6375
6376         if (!sc->sc_attached)
6377                 return 0;
6378
6379         iwm_init_task(device_get_softc(dev));
6380
6381         IWM_LOCK(sc);
6382         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6383                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6384                 do_reinit = 1;
6385         }
6386         IWM_UNLOCK(sc);
6387
6388         if (do_reinit)
6389                 ieee80211_resume_all(&sc->sc_ic);
6390
6391         return 0;
6392 }
6393
6394 static int
6395 iwm_suspend(device_t dev)
6396 {
6397         int do_stop = 0;
6398         struct iwm_softc *sc = device_get_softc(dev);
6399
6400         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6401
6402         if (!sc->sc_attached)
6403                 return (0);
6404
6405         ieee80211_suspend_all(&sc->sc_ic);
6406
6407         if (do_stop) {
6408                 IWM_LOCK(sc);
6409                 iwm_stop(sc);
6410                 sc->sc_flags |= IWM_FLAG_SCANNING;
6411                 IWM_UNLOCK(sc);
6412         }
6413
6414         return (0);
6415 }
6416
6417 static int
6418 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6419 {
6420         struct iwm_fw_info *fw = &sc->sc_fw;
6421         device_t dev = sc->sc_dev;
6422         int i;
6423
6424         if (!sc->sc_attached)
6425                 return 0;
6426         sc->sc_attached = 0;
6427         if (do_net80211) {
6428                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6429         }
6430         iwm_stop_device(sc);
6431         if (do_net80211) {
6432                 IWM_LOCK(sc);
6433                 iwm_xmit_queue_drain(sc);
6434                 IWM_UNLOCK(sc);
6435                 ieee80211_ifdetach(&sc->sc_ic);
6436         }
6437         callout_drain(&sc->sc_led_blink_to);
6438         callout_drain(&sc->sc_watchdog_to);
6439
6440         iwm_phy_db_free(sc->sc_phy_db);
6441         sc->sc_phy_db = NULL;
6442
6443         iwm_free_nvm_data(sc->nvm_data);
6444
6445         /* Free descriptor rings */
6446         iwm_free_rx_ring(sc, &sc->rxq);
6447         for (i = 0; i < nitems(sc->txq); i++)
6448                 iwm_free_tx_ring(sc, &sc->txq[i]);
6449
6450         /* Free firmware */
6451         if (fw->fw_fp != NULL)
6452                 iwm_fw_info_free(fw);
6453
6454         /* Free scheduler */
6455         iwm_dma_contig_free(&sc->sched_dma);
6456         iwm_dma_contig_free(&sc->ict_dma);
6457         iwm_dma_contig_free(&sc->kw_dma);
6458         iwm_dma_contig_free(&sc->fw_dma);
6459
6460         iwm_free_fw_paging(sc);
6461
6462         /* Finished with the hardware - detach things */
6463         iwm_pci_detach(dev);
6464
6465         if (sc->sc_notif_wait != NULL) {
6466                 iwm_notification_wait_free(sc->sc_notif_wait);
6467                 sc->sc_notif_wait = NULL;
6468         }
6469
6470         IWM_LOCK_DESTROY(sc);
6471
6472         return (0);
6473 }
6474
6475 static int
6476 iwm_detach(device_t dev)
6477 {
6478         struct iwm_softc *sc = device_get_softc(dev);
6479
6480         return (iwm_detach_local(sc, 1));
6481 }
6482
6483 static device_method_t iwm_pci_methods[] = {
6484         /* Device interface */
6485         DEVMETHOD(device_probe,         iwm_probe),
6486         DEVMETHOD(device_attach,        iwm_attach),
6487         DEVMETHOD(device_detach,        iwm_detach),
6488         DEVMETHOD(device_suspend,       iwm_suspend),
6489         DEVMETHOD(device_resume,        iwm_resume),
6490
6491         DEVMETHOD_END
6492 };
6493
6494 static driver_t iwm_pci_driver = {
6495         "iwm",
6496         iwm_pci_methods,
6497         sizeof (struct iwm_softc)
6498 };
6499
6500 static devclass_t iwm_devclass;
6501
6502 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6503 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6504 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6505 MODULE_DEPEND(iwm, wlan, 1, 1, 1);