if_iwm - Remove more old unused scan API definitions.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154
155 #include <machine/endian.h>
156
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159
160 #include <net/bpf.h>
161
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192
193 const uint8_t iwm_nvm_channels[] = {
194         /* 2.4 GHz */
195         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196         /* 5 GHz */
197         36, 40, 44, 48, 52, 56, 60, 64,
198         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199         149, 153, 157, 161, 165
200 };
201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
202     "IWM_NUM_CHANNELS is too small");
203
204 const uint8_t iwm_nvm_channels_8000[] = {
205         /* 2.4 GHz */
206         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207         /* 5 GHz */
208         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
209         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
210         149, 153, 157, 161, 165, 169, 173, 177, 181
211 };
212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
213     "IWM_NUM_CHANNELS_8000 is too small");
214
215 #define IWM_NUM_2GHZ_CHANNELS   14
216 #define IWM_N_HW_ADDR_MASK      0xF
217
218 /*
219  * XXX For now, there's simply a fixed set of rate table entries
220  * that are populated.
221  */
222 const struct iwm_rate {
223         uint8_t rate;
224         uint8_t plcp;
225 } iwm_rates[] = {
226         {   2,  IWM_RATE_1M_PLCP  },
227         {   4,  IWM_RATE_2M_PLCP  },
228         {  11,  IWM_RATE_5M_PLCP  },
229         {  22,  IWM_RATE_11M_PLCP },
230         {  12,  IWM_RATE_6M_PLCP  },
231         {  18,  IWM_RATE_9M_PLCP  },
232         {  24,  IWM_RATE_12M_PLCP },
233         {  36,  IWM_RATE_18M_PLCP },
234         {  48,  IWM_RATE_24M_PLCP },
235         {  72,  IWM_RATE_36M_PLCP },
236         {  96,  IWM_RATE_48M_PLCP },
237         { 108,  IWM_RATE_54M_PLCP },
238 };
239 #define IWM_RIDX_CCK    0
240 #define IWM_RIDX_OFDM   4
241 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244
245 struct iwm_nvm_section {
246         uint16_t length;
247         uint8_t *data;
248 };
249
250 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
251 static int      iwm_firmware_store_section(struct iwm_softc *,
252                                            enum iwm_ucode_type,
253                                            const uint8_t *, size_t);
254 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
255 static void     iwm_fw_info_free(struct iwm_fw_info *);
256 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 #if !defined(__DragonFly__)
258 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
259 #endif
260 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
261                                      bus_size_t, bus_size_t);
262 static void     iwm_dma_contig_free(struct iwm_dma_info *);
263 static int      iwm_alloc_fwmem(struct iwm_softc *);
264 static int      iwm_alloc_sched(struct iwm_softc *);
265 static int      iwm_alloc_kw(struct iwm_softc *);
266 static int      iwm_alloc_ict(struct iwm_softc *);
267 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
268 static void     iwm_disable_rx_dma(struct iwm_softc *);
269 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
270 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
271 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
272                                   int);
273 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
274 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
275 static void     iwm_enable_interrupts(struct iwm_softc *);
276 static void     iwm_restore_interrupts(struct iwm_softc *);
277 static void     iwm_disable_interrupts(struct iwm_softc *);
278 static void     iwm_ict_reset(struct iwm_softc *);
279 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
280 static void     iwm_stop_device(struct iwm_softc *);
281 static void     iwm_mvm_nic_config(struct iwm_softc *);
282 static int      iwm_nic_rx_init(struct iwm_softc *);
283 static int      iwm_nic_tx_init(struct iwm_softc *);
284 static int      iwm_nic_init(struct iwm_softc *);
285 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int      iwm_post_alive(struct iwm_softc *);
287 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
288                                    uint16_t, uint8_t *, uint16_t *);
289 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
290                                      uint16_t *, size_t);
291 static uint32_t iwm_eeprom_channel_flags(uint16_t);
292 static void     iwm_add_channel_band(struct iwm_softc *,
293                     struct ieee80211_channel[], int, int *, int, size_t,
294                     const uint8_t[]);
295 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
296                     struct ieee80211_channel[]);
297 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
298                                    const uint16_t *, const uint16_t *,
299                                    const uint16_t *, const uint16_t *,
300                                    const uint16_t *);
301 static void     iwm_set_hw_address_8000(struct iwm_softc *,
302                                         struct iwm_nvm_data *,
303                                         const uint16_t *, const uint16_t *);
304 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
305                             const uint16_t *);
306 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
307 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
308                                   const uint16_t *);
309 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
310                                    const uint16_t *);
311 static void     iwm_set_radio_cfg(const struct iwm_softc *,
312                                   struct iwm_nvm_data *, uint32_t);
313 static int      iwm_parse_nvm_sections(struct iwm_softc *,
314                                        struct iwm_nvm_section *);
315 static int      iwm_nvm_init(struct iwm_softc *);
316 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
317                                        const uint8_t *, uint32_t);
318 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
319                                         const uint8_t *, uint32_t);
320 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
321 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
322                                            struct iwm_fw_sects *, int , int *);
323 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
324 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
325 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
326 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
327 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
328 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
329                                               enum iwm_ucode_type);
330 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
331 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
332 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
333 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
334                                             struct iwm_rx_phy_info *);
335 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
336                                       struct iwm_rx_packet *,
337                                       struct iwm_rx_data *);
338 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
339 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
340                                    struct iwm_rx_data *);
341 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
342                                          struct iwm_rx_packet *,
343                                          struct iwm_node *);
344 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
345                                   struct iwm_rx_data *);
346 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
347 #if 0
348 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
349                                  uint16_t);
350 #endif
351 static const struct iwm_rate *
352         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
353                         struct ieee80211_frame *, struct iwm_tx_cmd *);
354 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
355                        struct ieee80211_node *, int);
356 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
357                              const struct ieee80211_bpf_params *);
358 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
359                                                 struct iwm_mvm_add_sta_cmd_v7 *,
360                                                 int *);
361 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
362                                        int);
363 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
364 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
365 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
366                                            struct iwm_int_sta *,
367                                            const uint8_t *, uint16_t, uint16_t);
368 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
369 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
370 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
371 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
372 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
373 static struct ieee80211_node *
374                 iwm_node_alloc(struct ieee80211vap *,
375                                const uint8_t[IEEE80211_ADDR_LEN]);
376 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
377 static int      iwm_media_change(struct ifnet *);
378 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
379 static void     iwm_endscan_cb(void *, int);
380 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
381                                         struct iwm_sf_cfg_cmd *,
382                                         struct ieee80211_node *);
383 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
384 static int      iwm_send_bt_init_conf(struct iwm_softc *);
385 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
386 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
387 static int      iwm_init_hw(struct iwm_softc *);
388 static void     iwm_init(struct iwm_softc *);
389 static void     iwm_start(struct iwm_softc *);
390 static void     iwm_stop(struct iwm_softc *);
391 static void     iwm_watchdog(void *);
392 static void     iwm_parent(struct ieee80211com *);
393 #ifdef IWM_DEBUG
394 static const char *
395                 iwm_desc_lookup(uint32_t);
396 static void     iwm_nic_error(struct iwm_softc *);
397 static void     iwm_nic_umac_error(struct iwm_softc *);
398 #endif
399 static void     iwm_notif_intr(struct iwm_softc *);
400 static void     iwm_intr(void *);
401 static int      iwm_attach(device_t);
402 static int      iwm_is_valid_ether_addr(uint8_t *);
403 static void     iwm_preinit(void *);
404 static int      iwm_detach_local(struct iwm_softc *sc, int);
405 static void     iwm_init_task(void *);
406 static void     iwm_radiotap_attach(struct iwm_softc *);
407 static struct ieee80211vap *
408                 iwm_vap_create(struct ieee80211com *,
409                                const char [IFNAMSIZ], int,
410                                enum ieee80211_opmode, int,
411                                const uint8_t [IEEE80211_ADDR_LEN],
412                                const uint8_t [IEEE80211_ADDR_LEN]);
413 static void     iwm_vap_delete(struct ieee80211vap *);
414 static void     iwm_scan_start(struct ieee80211com *);
415 static void     iwm_scan_end(struct ieee80211com *);
416 static void     iwm_update_mcast(struct ieee80211com *);
417 static void     iwm_set_channel(struct ieee80211com *);
418 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
419 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
420 static int      iwm_detach(device_t);
421
422 #if defined(__DragonFly__)
423 static int      iwm_msi_enable = 1;
424
425 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
426
427 #endif
428
429 /*
430  * Firmware parser.
431  */
432
433 static int
434 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
435 {
436         const struct iwm_fw_cscheme_list *l = (const void *)data;
437
438         if (dlen < sizeof(*l) ||
439             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
440                 return EINVAL;
441
442         /* we don't actually store anything for now, always use s/w crypto */
443
444         return 0;
445 }
446
447 static int
448 iwm_firmware_store_section(struct iwm_softc *sc,
449     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
450 {
451         struct iwm_fw_sects *fws;
452         struct iwm_fw_onesect *fwone;
453
454         if (type >= IWM_UCODE_TYPE_MAX)
455                 return EINVAL;
456         if (dlen < sizeof(uint32_t))
457                 return EINVAL;
458
459         fws = &sc->sc_fw.fw_sects[type];
460         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
461                 return EINVAL;
462
463         fwone = &fws->fw_sect[fws->fw_count];
464
465         /* first 32bit are device load offset */
466         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
467
468         /* rest is data */
469         fwone->fws_data = data + sizeof(uint32_t);
470         fwone->fws_len = dlen - sizeof(uint32_t);
471
472         fws->fw_count++;
473         fws->fw_totlen += fwone->fws_len;
474
475         return 0;
476 }
477
478 #define IWM_DEFAULT_SCAN_CHANNELS 40
479
480 struct iwm_tlv_calib_data {
481         uint32_t ucode_type;
482         struct iwm_tlv_calib_ctrl calib;
483 } __packed;
484
485 static int
486 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
487 {
488         const struct iwm_tlv_calib_data *def_calib = data;
489         uint32_t ucode_type = le32toh(def_calib->ucode_type);
490
491         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
492                 device_printf(sc->sc_dev,
493                     "Wrong ucode_type %u for default "
494                     "calibration.\n", ucode_type);
495                 return EINVAL;
496         }
497
498         sc->sc_default_calib[ucode_type].flow_trigger =
499             def_calib->calib.flow_trigger;
500         sc->sc_default_calib[ucode_type].event_trigger =
501             def_calib->calib.event_trigger;
502
503         return 0;
504 }
505
506 static void
507 iwm_fw_info_free(struct iwm_fw_info *fw)
508 {
509         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
510         fw->fw_fp = NULL;
511         /* don't touch fw->fw_status */
512         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
513 }
514
515 static int
516 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
517 {
518         struct iwm_fw_info *fw = &sc->sc_fw;
519         const struct iwm_tlv_ucode_header *uhdr;
520         struct iwm_ucode_tlv tlv;
521         enum iwm_ucode_tlv_type tlv_type;
522         const struct firmware *fwp;
523         const uint8_t *data;
524         int error = 0;
525         size_t len;
526
527         if (fw->fw_status == IWM_FW_STATUS_DONE &&
528             ucode_type != IWM_UCODE_TYPE_INIT)
529                 return 0;
530
531         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
532 #if defined(__DragonFly__)
533                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
534 #else
535                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
536 #endif
537         }
538         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
539
540         if (fw->fw_fp != NULL)
541                 iwm_fw_info_free(fw);
542
543         /*
544          * Load firmware into driver memory.
545          * fw_fp will be set.
546          */
547         IWM_UNLOCK(sc);
548         fwp = firmware_get(sc->sc_fwname);
549         IWM_LOCK(sc);
550         if (fwp == NULL) {
551                 device_printf(sc->sc_dev,
552                     "could not read firmware %s (error %d)\n",
553                     sc->sc_fwname, error);
554                 goto out;
555         }
556         fw->fw_fp = fwp;
557
558         /* (Re-)Initialize default values. */
559         sc->sc_capaflags = 0;
560         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
561         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
562         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
563
564         /*
565          * Parse firmware contents
566          */
567
568         uhdr = (const void *)fw->fw_fp->data;
569         if (*(const uint32_t *)fw->fw_fp->data != 0
570             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
571                 device_printf(sc->sc_dev, "invalid firmware %s\n",
572                     sc->sc_fwname);
573                 error = EINVAL;
574                 goto out;
575         }
576
577         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
578             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
579             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
580             IWM_UCODE_API(le32toh(uhdr->ver)));
581         data = uhdr->data;
582         len = fw->fw_fp->datasize - sizeof(*uhdr);
583
584         while (len >= sizeof(tlv)) {
585                 size_t tlv_len;
586                 const void *tlv_data;
587
588                 memcpy(&tlv, data, sizeof(tlv));
589                 tlv_len = le32toh(tlv.length);
590                 tlv_type = le32toh(tlv.type);
591
592                 len -= sizeof(tlv);
593                 data += sizeof(tlv);
594                 tlv_data = data;
595
596                 if (len < tlv_len) {
597                         device_printf(sc->sc_dev,
598                             "firmware too short: %zu bytes\n",
599                             len);
600                         error = EINVAL;
601                         goto parse_out;
602                 }
603
604                 switch ((int)tlv_type) {
605                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
606                         if (tlv_len < sizeof(uint32_t)) {
607                                 device_printf(sc->sc_dev,
608                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
609                                     __func__,
610                                     (int) tlv_len);
611                                 error = EINVAL;
612                                 goto parse_out;
613                         }
614                         sc->sc_capa_max_probe_len
615                             = le32toh(*(const uint32_t *)tlv_data);
616                         /* limit it to something sensible */
617                         if (sc->sc_capa_max_probe_len >
618                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
619                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
620                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
621                                     "ridiculous\n", __func__);
622                                 error = EINVAL;
623                                 goto parse_out;
624                         }
625                         break;
626                 case IWM_UCODE_TLV_PAN:
627                         if (tlv_len) {
628                                 device_printf(sc->sc_dev,
629                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
630                                     __func__,
631                                     (int) tlv_len);
632                                 error = EINVAL;
633                                 goto parse_out;
634                         }
635                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
636                         break;
637                 case IWM_UCODE_TLV_FLAGS:
638                         if (tlv_len < sizeof(uint32_t)) {
639                                 device_printf(sc->sc_dev,
640                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
641                                     __func__,
642                                     (int) tlv_len);
643                                 error = EINVAL;
644                                 goto parse_out;
645                         }
646                         /*
647                          * Apparently there can be many flags, but Linux driver
648                          * parses only the first one, and so do we.
649                          *
650                          * XXX: why does this override IWM_UCODE_TLV_PAN?
651                          * Intentional or a bug?  Observations from
652                          * current firmware file:
653                          *  1) TLV_PAN is parsed first
654                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
655                          * ==> this resets TLV_PAN to itself... hnnnk
656                          */
657                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
658                         break;
659                 case IWM_UCODE_TLV_CSCHEME:
660                         if ((error = iwm_store_cscheme(sc,
661                             tlv_data, tlv_len)) != 0) {
662                                 device_printf(sc->sc_dev,
663                                     "%s: iwm_store_cscheme(): returned %d\n",
664                                     __func__,
665                                     error);
666                                 goto parse_out;
667                         }
668                         break;
669                 case IWM_UCODE_TLV_NUM_OF_CPU: {
670                         uint32_t num_cpu;
671                         if (tlv_len != sizeof(uint32_t)) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
674                                     __func__,
675                                     (int) tlv_len);
676                                 error = EINVAL;
677                                 goto parse_out;
678                         }
679                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
680                         if (num_cpu < 1 || num_cpu > 2) {
681                                 device_printf(sc->sc_dev,
682                                     "%s: Driver supports only 1 or 2 CPUs\n",
683                                     __func__);
684                                 error = EINVAL;
685                                 goto parse_out;
686                         }
687                         break;
688                 }
689                 case IWM_UCODE_TLV_SEC_RT:
690                         if ((error = iwm_firmware_store_section(sc,
691                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
692                                 device_printf(sc->sc_dev,
693                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
694                                     __func__,
695                                     error);
696                                 goto parse_out;
697                         }
698                         break;
699                 case IWM_UCODE_TLV_SEC_INIT:
700                         if ((error = iwm_firmware_store_section(sc,
701                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
702                                 device_printf(sc->sc_dev,
703                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
704                                     __func__,
705                                     error);
706                                 goto parse_out;
707                         }
708                         break;
709                 case IWM_UCODE_TLV_SEC_WOWLAN:
710                         if ((error = iwm_firmware_store_section(sc,
711                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
712                                 device_printf(sc->sc_dev,
713                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
714                                     __func__,
715                                     error);
716                                 goto parse_out;
717                         }
718                         break;
719                 case IWM_UCODE_TLV_DEF_CALIB:
720                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
721                                 device_printf(sc->sc_dev,
722                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
723                                     __func__,
724                                     (int) tlv_len,
725                                     (int) sizeof(struct iwm_tlv_calib_data));
726                                 error = EINVAL;
727                                 goto parse_out;
728                         }
729                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
730                                 device_printf(sc->sc_dev,
731                                     "%s: iwm_set_default_calib() failed: %d\n",
732                                     __func__,
733                                     error);
734                                 goto parse_out;
735                         }
736                         break;
737                 case IWM_UCODE_TLV_PHY_SKU:
738                         if (tlv_len != sizeof(uint32_t)) {
739                                 error = EINVAL;
740                                 device_printf(sc->sc_dev,
741                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
742                                     __func__,
743                                     (int) tlv_len);
744                                 goto parse_out;
745                         }
746                         sc->sc_fw_phy_config =
747                             le32toh(*(const uint32_t *)tlv_data);
748                         break;
749
750                 case IWM_UCODE_TLV_API_CHANGES_SET: {
751                         const struct iwm_ucode_api *api;
752                         if (tlv_len != sizeof(*api)) {
753                                 error = EINVAL;
754                                 goto parse_out;
755                         }
756                         api = (const struct iwm_ucode_api *)tlv_data;
757                         /* Flags may exceed 32 bits in future firmware. */
758                         if (le32toh(api->api_index) > 0) {
759                                 device_printf(sc->sc_dev,
760                                     "unsupported API index %d\n",
761                                     le32toh(api->api_index));
762                                 goto parse_out;
763                         }
764                         sc->sc_ucode_api = le32toh(api->api_flags);
765                         break;
766                 }
767
768                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
769                         const struct iwm_ucode_capa *capa;
770                         int idx, i;
771                         if (tlv_len != sizeof(*capa)) {
772                                 error = EINVAL;
773                                 goto parse_out;
774                         }
775                         capa = (const struct iwm_ucode_capa *)tlv_data;
776                         idx = le32toh(capa->api_index);
777                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
778                                 device_printf(sc->sc_dev,
779                                     "unsupported API index %d\n", idx);
780                                 goto parse_out;
781                         }
782                         for (i = 0; i < 32; i++) {
783                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
784                                         continue;
785                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
786                         }
787                         break;
788                 }
789
790                 case 48: /* undocumented TLV */
791                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
792                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
793                         /* ignore, not used by current driver */
794                         break;
795
796                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
797                         if ((error = iwm_firmware_store_section(sc,
798                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
799                             tlv_len)) != 0)
800                                 goto parse_out;
801                         break;
802
803                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
804                         if (tlv_len != sizeof(uint32_t)) {
805                                 error = EINVAL;
806                                 goto parse_out;
807                         }
808                         sc->sc_capa_n_scan_channels =
809                           le32toh(*(const uint32_t *)tlv_data);
810                         break;
811
812                 case IWM_UCODE_TLV_FW_VERSION:
813                         if (tlv_len != sizeof(uint32_t) * 3) {
814                                 error = EINVAL;
815                                 goto parse_out;
816                         }
817                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
818                             "%d.%d.%d",
819                             le32toh(((const uint32_t *)tlv_data)[0]),
820                             le32toh(((const uint32_t *)tlv_data)[1]),
821                             le32toh(((const uint32_t *)tlv_data)[2]));
822                         break;
823
824                 default:
825                         device_printf(sc->sc_dev,
826                             "%s: unknown firmware section %d, abort\n",
827                             __func__, tlv_type);
828                         error = EINVAL;
829                         goto parse_out;
830                 }
831
832                 len -= roundup(tlv_len, 4);
833                 data += roundup(tlv_len, 4);
834         }
835
836         KASSERT(error == 0, ("unhandled error"));
837
838  parse_out:
839         if (error) {
840                 device_printf(sc->sc_dev, "firmware parse error %d, "
841                     "section type %d\n", error, tlv_type);
842         }
843
844         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
845                 device_printf(sc->sc_dev,
846                     "device uses unsupported power ops\n");
847                 error = ENOTSUP;
848         }
849
850  out:
851         if (error) {
852                 fw->fw_status = IWM_FW_STATUS_NONE;
853                 if (fw->fw_fp != NULL)
854                         iwm_fw_info_free(fw);
855         } else
856                 fw->fw_status = IWM_FW_STATUS_DONE;
857         wakeup(&sc->sc_fw);
858
859         return error;
860 }
861
862 /*
863  * DMA resource routines
864  */
865
866 #if !defined(__DragonFly__)
867 static void
868 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
869 {
870         if (error != 0)
871                 return;
872         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
873         *(bus_addr_t *)arg = segs[0].ds_addr;
874 }
875 #endif
876
877 static int
878 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
879     bus_size_t size, bus_size_t alignment)
880 {
881         int error;
882
883         dma->tag = NULL;
884         dma->map = NULL;
885         dma->size = size;
886         dma->vaddr = NULL;
887
888 #if defined(__DragonFly__)
889         bus_dmamem_t dmem;
890         error = bus_dmamem_coherent(tag, alignment, 0,
891                                     BUS_SPACE_MAXADDR_32BIT,
892                                     BUS_SPACE_MAXADDR,
893                                     size, BUS_DMA_NOWAIT, &dmem);
894         if (error != 0)
895                 goto fail;
896
897         dma->tag = dmem.dmem_tag;
898         dma->map = dmem.dmem_map;
899         dma->vaddr = dmem.dmem_addr;
900         dma->paddr = dmem.dmem_busaddr;
901 #else
902         error = bus_dma_tag_create(tag, alignment,
903             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
904             1, size, 0, NULL, NULL, &dma->tag);
905         if (error != 0)
906                 goto fail;
907
908         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
909             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
910         if (error != 0)
911                 goto fail;
912
913         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
914             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
915         if (error != 0) {
916                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
917                 dma->vaddr = NULL;
918                 goto fail;
919         }
920 #endif
921
922         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
923
924         return 0;
925
926 fail:
927         iwm_dma_contig_free(dma);
928
929         return error;
930 }
931
932 static void
933 iwm_dma_contig_free(struct iwm_dma_info *dma)
934 {
935         if (dma->vaddr != NULL) {
936                 bus_dmamap_sync(dma->tag, dma->map,
937                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
938                 bus_dmamap_unload(dma->tag, dma->map);
939                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
940                 dma->vaddr = NULL;
941         }
942         if (dma->tag != NULL) {
943                 bus_dma_tag_destroy(dma->tag);
944                 dma->tag = NULL;
945         }
946 }
947
948 /* fwmem is used to load firmware onto the card */
949 static int
950 iwm_alloc_fwmem(struct iwm_softc *sc)
951 {
952         /* Must be aligned on a 16-byte boundary. */
953         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
954             sc->sc_fwdmasegsz, 16);
955 }
956
957 /* tx scheduler rings.  not used? */
958 static int
959 iwm_alloc_sched(struct iwm_softc *sc)
960 {
961         /* TX scheduler rings must be aligned on a 1KB boundary. */
962         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
963             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
964 }
965
966 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
967 static int
968 iwm_alloc_kw(struct iwm_softc *sc)
969 {
970         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
971 }
972
973 /* interrupt cause table */
974 static int
975 iwm_alloc_ict(struct iwm_softc *sc)
976 {
977         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
978             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
979 }
980
981 static int
982 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
983 {
984         bus_size_t size;
985         int i, error;
986
987         ring->cur = 0;
988
989         /* Allocate RX descriptors (256-byte aligned). */
990         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
991         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
992         if (error != 0) {
993                 device_printf(sc->sc_dev,
994                     "could not allocate RX ring DMA memory\n");
995                 goto fail;
996         }
997         ring->desc = ring->desc_dma.vaddr;
998
999         /* Allocate RX status area (16-byte aligned). */
1000         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1001             sizeof(*ring->stat), 16);
1002         if (error != 0) {
1003                 device_printf(sc->sc_dev,
1004                     "could not allocate RX status DMA memory\n");
1005                 goto fail;
1006         }
1007         ring->stat = ring->stat_dma.vaddr;
1008
1009         /* Create RX buffer DMA tag. */
1010 #if defined(__DragonFly__)
1011         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1012                                    0,
1013                                    BUS_SPACE_MAXADDR_32BIT,
1014                                    BUS_SPACE_MAXADDR,
1015                                    NULL, NULL,
1016                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1017                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1018 #else
1019         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1020             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1021             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1022 #endif
1023         if (error != 0) {
1024                 device_printf(sc->sc_dev,
1025                     "%s: could not create RX buf DMA tag, error %d\n",
1026                     __func__, error);
1027                 goto fail;
1028         }
1029
1030         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1031         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1032         if (error != 0) {
1033                 device_printf(sc->sc_dev,
1034                     "%s: could not create RX buf DMA map, error %d\n",
1035                     __func__, error);
1036                 goto fail;
1037         }
1038         /*
1039          * Allocate and map RX buffers.
1040          */
1041         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1042                 struct iwm_rx_data *data = &ring->data[i];
1043                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1044                 if (error != 0) {
1045                         device_printf(sc->sc_dev,
1046                             "%s: could not create RX buf DMA map, error %d\n",
1047                             __func__, error);
1048                         goto fail;
1049                 }
1050                 data->m = NULL;
1051
1052                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1053                         goto fail;
1054                 }
1055         }
1056         return 0;
1057
1058 fail:   iwm_free_rx_ring(sc, ring);
1059         return error;
1060 }
1061
1062 static void
1063 iwm_disable_rx_dma(struct iwm_softc *sc)
1064 {
1065         /* XXX conditional nic locks are stupid */
1066         /* XXX print out if we can't lock the NIC? */
1067         if (iwm_nic_lock(sc)) {
1068                 /* XXX handle if RX stop doesn't finish? */
1069                 (void) iwm_pcie_rx_stop(sc);
1070                 iwm_nic_unlock(sc);
1071         }
1072 }
1073
1074 static void
1075 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1076 {
1077         /* Reset the ring state */
1078         ring->cur = 0;
1079
1080         /*
1081          * The hw rx ring index in shared memory must also be cleared,
1082          * otherwise the discrepancy can cause reprocessing chaos.
1083          */
1084         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1085 }
1086
1087 static void
1088 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1089 {
1090         int i;
1091
1092         iwm_dma_contig_free(&ring->desc_dma);
1093         iwm_dma_contig_free(&ring->stat_dma);
1094
1095         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1096                 struct iwm_rx_data *data = &ring->data[i];
1097
1098                 if (data->m != NULL) {
1099                         bus_dmamap_sync(ring->data_dmat, data->map,
1100                             BUS_DMASYNC_POSTREAD);
1101                         bus_dmamap_unload(ring->data_dmat, data->map);
1102                         m_freem(data->m);
1103                         data->m = NULL;
1104                 }
1105                 if (data->map != NULL) {
1106                         bus_dmamap_destroy(ring->data_dmat, data->map);
1107                         data->map = NULL;
1108                 }
1109         }
1110         if (ring->spare_map != NULL) {
1111                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1112                 ring->spare_map = NULL;
1113         }
1114         if (ring->data_dmat != NULL) {
1115                 bus_dma_tag_destroy(ring->data_dmat);
1116                 ring->data_dmat = NULL;
1117         }
1118 }
1119
1120 static int
1121 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1122 {
1123         bus_addr_t paddr;
1124         bus_size_t size;
1125         size_t maxsize;
1126         int nsegments;
1127         int i, error;
1128
1129         ring->qid = qid;
1130         ring->queued = 0;
1131         ring->cur = 0;
1132
1133         /* Allocate TX descriptors (256-byte aligned). */
1134         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1135         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1136         if (error != 0) {
1137                 device_printf(sc->sc_dev,
1138                     "could not allocate TX ring DMA memory\n");
1139                 goto fail;
1140         }
1141         ring->desc = ring->desc_dma.vaddr;
1142
1143         /*
1144          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1145          * to allocate commands space for other rings.
1146          */
1147         if (qid > IWM_MVM_CMD_QUEUE)
1148                 return 0;
1149
1150         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1151         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1152         if (error != 0) {
1153                 device_printf(sc->sc_dev,
1154                     "could not allocate TX cmd DMA memory\n");
1155                 goto fail;
1156         }
1157         ring->cmd = ring->cmd_dma.vaddr;
1158
1159         /* FW commands may require more mapped space than packets. */
1160         if (qid == IWM_MVM_CMD_QUEUE) {
1161                 maxsize = IWM_RBUF_SIZE;
1162                 nsegments = 1;
1163         } else {
1164                 maxsize = MCLBYTES;
1165                 nsegments = IWM_MAX_SCATTER - 2;
1166         }
1167
1168 #if defined(__DragonFly__)
1169         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1170                                    0,
1171                                    BUS_SPACE_MAXADDR_32BIT,
1172                                    BUS_SPACE_MAXADDR,
1173                                    NULL, NULL,
1174                                    maxsize, nsegments, maxsize,
1175                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1176 #else
1177         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1178             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1179             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1180 #endif
1181         if (error != 0) {
1182                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1183                 goto fail;
1184         }
1185
1186         paddr = ring->cmd_dma.paddr;
1187         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1188                 struct iwm_tx_data *data = &ring->data[i];
1189
1190                 data->cmd_paddr = paddr;
1191                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1192                     + offsetof(struct iwm_tx_cmd, scratch);
1193                 paddr += sizeof(struct iwm_device_cmd);
1194
1195                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1196                 if (error != 0) {
1197                         device_printf(sc->sc_dev,
1198                             "could not create TX buf DMA map\n");
1199                         goto fail;
1200                 }
1201         }
1202         KASSERT(paddr == ring->cmd_dma.paddr + size,
1203             ("invalid physical address"));
1204         return 0;
1205
1206 fail:   iwm_free_tx_ring(sc, ring);
1207         return error;
1208 }
1209
1210 static void
1211 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1212 {
1213         int i;
1214
1215         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1216                 struct iwm_tx_data *data = &ring->data[i];
1217
1218                 if (data->m != NULL) {
1219                         bus_dmamap_sync(ring->data_dmat, data->map,
1220                             BUS_DMASYNC_POSTWRITE);
1221                         bus_dmamap_unload(ring->data_dmat, data->map);
1222                         m_freem(data->m);
1223                         data->m = NULL;
1224                 }
1225         }
1226         /* Clear TX descriptors. */
1227         memset(ring->desc, 0, ring->desc_dma.size);
1228         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1229             BUS_DMASYNC_PREWRITE);
1230         sc->qfullmsk &= ~(1 << ring->qid);
1231         ring->queued = 0;
1232         ring->cur = 0;
1233 }
1234
1235 static void
1236 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 {
1238         int i;
1239
1240         iwm_dma_contig_free(&ring->desc_dma);
1241         iwm_dma_contig_free(&ring->cmd_dma);
1242
1243         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1244                 struct iwm_tx_data *data = &ring->data[i];
1245
1246                 if (data->m != NULL) {
1247                         bus_dmamap_sync(ring->data_dmat, data->map,
1248                             BUS_DMASYNC_POSTWRITE);
1249                         bus_dmamap_unload(ring->data_dmat, data->map);
1250                         m_freem(data->m);
1251                         data->m = NULL;
1252                 }
1253                 if (data->map != NULL) {
1254                         bus_dmamap_destroy(ring->data_dmat, data->map);
1255                         data->map = NULL;
1256                 }
1257         }
1258         if (ring->data_dmat != NULL) {
1259                 bus_dma_tag_destroy(ring->data_dmat);
1260                 ring->data_dmat = NULL;
1261         }
1262 }
1263
1264 /*
1265  * High-level hardware frobbing routines
1266  */
1267
1268 static void
1269 iwm_enable_interrupts(struct iwm_softc *sc)
1270 {
1271         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1272         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1273 }
1274
1275 static void
1276 iwm_restore_interrupts(struct iwm_softc *sc)
1277 {
1278         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1279 }
1280
1281 static void
1282 iwm_disable_interrupts(struct iwm_softc *sc)
1283 {
1284         /* disable interrupts */
1285         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1286
1287         /* acknowledge all interrupts */
1288         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1289         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1290 }
1291
1292 static void
1293 iwm_ict_reset(struct iwm_softc *sc)
1294 {
1295         iwm_disable_interrupts(sc);
1296
1297         /* Reset ICT table. */
1298         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1299         sc->ict_cur = 0;
1300
1301         /* Set physical address of ICT table (4KB aligned). */
1302         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1303             IWM_CSR_DRAM_INT_TBL_ENABLE
1304             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1305             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1306             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1307
1308         /* Switch to ICT interrupt mode in driver. */
1309         sc->sc_flags |= IWM_FLAG_USE_ICT;
1310
1311         /* Re-enable interrupts. */
1312         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1313         iwm_enable_interrupts(sc);
1314 }
1315
1316 /*
1317  * Since this .. hard-resets things, it's time to actually
1318  * mark the first vap (if any) as having no mac context.
1319  * It's annoying, but since the driver is potentially being
1320  * stop/start'ed whilst active (thanks openbsd port!) we
1321  * have to correctly track this.
1322  */
1323 static void
1324 iwm_stop_device(struct iwm_softc *sc)
1325 {
1326         struct ieee80211com *ic = &sc->sc_ic;
1327         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1328         int chnl, qid;
1329         uint32_t mask = 0;
1330
1331         /* tell the device to stop sending interrupts */
1332         iwm_disable_interrupts(sc);
1333
1334         /*
1335          * FreeBSD-local: mark the first vap as not-uploaded,
1336          * so the next transition through auth/assoc
1337          * will correctly populate the MAC context.
1338          */
1339         if (vap) {
1340                 struct iwm_vap *iv = IWM_VAP(vap);
1341                 iv->is_uploaded = 0;
1342         }
1343
1344         /* device going down, Stop using ICT table */
1345         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1346
1347         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1348
1349         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1350
1351         if (iwm_nic_lock(sc)) {
1352                 /* Stop each Tx DMA channel */
1353                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1354                         IWM_WRITE(sc,
1355                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1356                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1357                 }
1358
1359                 /* Wait for DMA channels to be idle */
1360                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1361                     5000)) {
1362                         device_printf(sc->sc_dev,
1363                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1364                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1365                 }
1366                 iwm_nic_unlock(sc);
1367         }
1368         iwm_disable_rx_dma(sc);
1369
1370         /* Stop RX ring. */
1371         iwm_reset_rx_ring(sc, &sc->rxq);
1372
1373         /* Reset all TX rings. */
1374         for (qid = 0; qid < nitems(sc->txq); qid++)
1375                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1376
1377         /*
1378          * Power-down device's busmaster DMA clocks
1379          */
1380         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1381         DELAY(5);
1382
1383         /* Make sure (redundant) we've released our request to stay awake */
1384         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1385             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1386
1387         /* Stop the device, and put it in low power state */
1388         iwm_apm_stop(sc);
1389
1390         /* stop and reset the on-board processor */
1391         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1392         DELAY(1000);
1393
1394         /*
1395          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1396          * This is a bug in certain verions of the hardware.
1397          * Certain devices also keep sending HW RF kill interrupt all
1398          * the time, unless the interrupt is ACKed even if the interrupt
1399          * should be masked. Re-ACK all the interrupts here.
1400          */
1401         iwm_disable_interrupts(sc);
1402
1403         /*
1404          * Even if we stop the HW, we still want the RF kill
1405          * interrupt
1406          */
1407         iwm_enable_rfkill_int(sc);
1408         iwm_check_rfkill(sc);
1409 }
1410
1411 static void
1412 iwm_mvm_nic_config(struct iwm_softc *sc)
1413 {
1414         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1415         uint32_t reg_val = 0;
1416
1417         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1418             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1419         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1420             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1421         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1422             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1423
1424         /* SKU control */
1425         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1426             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1427         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1428             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1429
1430         /* radio configuration */
1431         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1432         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1433         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1434
1435         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1436
1437         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1438             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1439             radio_cfg_step, radio_cfg_dash);
1440
1441         /*
1442          * W/A : NIC is stuck in a reset state after Early PCIe power off
1443          * (PCIe power is lost before PERST# is asserted), causing ME FW
1444          * to lose ownership and not being able to obtain it back.
1445          */
1446         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1447                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1448                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1449                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1450         }
1451 }
1452
1453 static int
1454 iwm_nic_rx_init(struct iwm_softc *sc)
1455 {
1456         if (!iwm_nic_lock(sc))
1457                 return EBUSY;
1458
1459         /*
1460          * Initialize RX ring.  This is from the iwn driver.
1461          */
1462         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1463
1464         /* stop DMA */
1465         iwm_disable_rx_dma(sc);
1466         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1467         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1468         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1469         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1470
1471         /* Set physical address of RX ring (256-byte aligned). */
1472         IWM_WRITE(sc,
1473             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1474
1475         /* Set physical address of RX status (16-byte aligned). */
1476         IWM_WRITE(sc,
1477             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1478
1479 #if defined(__DragonFly__)
1480         /* Force serialization (probably not needed but don't trust the HW) */
1481         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1482 #endif
1483
1484         /* Enable RX. */
1485         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1486             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1487             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1488             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1489             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1490             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1491             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1492             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1493
1494         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1495
1496         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1497         if (sc->host_interrupt_operation_mode)
1498                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1499
1500         /*
1501          * Thus sayeth el jefe (iwlwifi) via a comment:
1502          *
1503          * This value should initially be 0 (before preparing any
1504          * RBs), should be 8 after preparing the first 8 RBs (for example)
1505          */
1506         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1507
1508         iwm_nic_unlock(sc);
1509
1510         return 0;
1511 }
1512
1513 static int
1514 iwm_nic_tx_init(struct iwm_softc *sc)
1515 {
1516         int qid;
1517
1518         if (!iwm_nic_lock(sc))
1519                 return EBUSY;
1520
1521         /* Deactivate TX scheduler. */
1522         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1523
1524         /* Set physical address of "keep warm" page (16-byte aligned). */
1525         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1526
1527         /* Initialize TX rings. */
1528         for (qid = 0; qid < nitems(sc->txq); qid++) {
1529                 struct iwm_tx_ring *txq = &sc->txq[qid];
1530
1531                 /* Set physical address of TX ring (256-byte aligned). */
1532                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1533                     txq->desc_dma.paddr >> 8);
1534                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1535                     "%s: loading ring %d descriptors (%p) at %lx\n",
1536                     __func__,
1537                     qid, txq->desc,
1538                     (unsigned long) (txq->desc_dma.paddr >> 8));
1539         }
1540
1541         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1542
1543         iwm_nic_unlock(sc);
1544
1545         return 0;
1546 }
1547
1548 static int
1549 iwm_nic_init(struct iwm_softc *sc)
1550 {
1551         int error;
1552
1553         iwm_apm_init(sc);
1554         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1555                 iwm_set_pwr(sc);
1556
1557         iwm_mvm_nic_config(sc);
1558
1559         if ((error = iwm_nic_rx_init(sc)) != 0)
1560                 return error;
1561
1562         /*
1563          * Ditto for TX, from iwn
1564          */
1565         if ((error = iwm_nic_tx_init(sc)) != 0)
1566                 return error;
1567
1568         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1569             "%s: shadow registers enabled\n", __func__);
1570         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1571
1572         return 0;
1573 }
1574
1575 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1576         IWM_MVM_TX_FIFO_VO,
1577         IWM_MVM_TX_FIFO_VI,
1578         IWM_MVM_TX_FIFO_BE,
1579         IWM_MVM_TX_FIFO_BK,
1580 };
1581
1582 static int
1583 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1584 {
1585         if (!iwm_nic_lock(sc)) {
1586                 device_printf(sc->sc_dev,
1587                     "%s: cannot enable txq %d\n",
1588                     __func__,
1589                     qid);
1590                 return EBUSY;
1591         }
1592
1593         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1594
1595         if (qid == IWM_MVM_CMD_QUEUE) {
1596                 /* unactivate before configuration */
1597                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1598                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1599                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1600
1601                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1602
1603                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1604
1605                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1606                 /* Set scheduler window size and frame limit. */
1607                 iwm_write_mem32(sc,
1608                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1609                     sizeof(uint32_t),
1610                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1611                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1612                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1613                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1614
1615                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1616                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1617                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1618                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1619                     IWM_SCD_QUEUE_STTS_REG_MSK);
1620         } else {
1621                 struct iwm_scd_txq_cfg_cmd cmd;
1622                 int error;
1623
1624                 iwm_nic_unlock(sc);
1625
1626                 memset(&cmd, 0, sizeof(cmd));
1627                 cmd.scd_queue = qid;
1628                 cmd.enable = 1;
1629                 cmd.sta_id = sta_id;
1630                 cmd.tx_fifo = fifo;
1631                 cmd.aggregate = 0;
1632                 cmd.window = IWM_FRAME_LIMIT;
1633
1634                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1635                     sizeof(cmd), &cmd);
1636                 if (error) {
1637                         device_printf(sc->sc_dev,
1638                             "cannot enable txq %d\n", qid);
1639                         return error;
1640                 }
1641
1642                 if (!iwm_nic_lock(sc))
1643                         return EBUSY;
1644         }
1645
1646         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1647             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1648
1649         iwm_nic_unlock(sc);
1650
1651         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1652             __func__, qid, fifo);
1653
1654         return 0;
1655 }
1656
1657 static int
1658 iwm_post_alive(struct iwm_softc *sc)
1659 {
1660         int nwords;
1661         int error, chnl;
1662         uint32_t base;
1663
1664         if (!iwm_nic_lock(sc))
1665                 return EBUSY;
1666
1667         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1668         if (sc->sched_base != base) {
1669                 device_printf(sc->sc_dev,
1670                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1671                     __func__, sc->sched_base, base);
1672         }
1673
1674         iwm_ict_reset(sc);
1675
1676         /* Clear TX scheduler state in SRAM. */
1677         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1678             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1679             / sizeof(uint32_t);
1680         error = iwm_write_mem(sc,
1681             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1682             NULL, nwords);
1683         if (error)
1684                 goto out;
1685
1686         /* Set physical address of TX scheduler rings (1KB aligned). */
1687         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1688
1689         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1690
1691         iwm_nic_unlock(sc);
1692
1693         /* enable command channel */
1694         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1695         if (error)
1696                 return error;
1697
1698         if (!iwm_nic_lock(sc))
1699                 return EBUSY;
1700
1701         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1702
1703         /* Enable DMA channels. */
1704         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1705                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1706                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1707                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1708         }
1709
1710         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1711             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1712
1713         /* Enable L1-Active */
1714         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1715                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1716                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1717         }
1718
1719  out:
1720         iwm_nic_unlock(sc);
1721         return error;
1722 }
1723
1724 /*
1725  * NVM read access and content parsing.  We do not support
1726  * external NVM or writing NVM.
1727  * iwlwifi/mvm/nvm.c
1728  */
1729
1730 /* list of NVM sections we are allowed/need to read */
1731 const int nvm_to_read[] = {
1732         IWM_NVM_SECTION_TYPE_HW,
1733         IWM_NVM_SECTION_TYPE_SW,
1734         IWM_NVM_SECTION_TYPE_REGULATORY,
1735         IWM_NVM_SECTION_TYPE_CALIBRATION,
1736         IWM_NVM_SECTION_TYPE_PRODUCTION,
1737         IWM_NVM_SECTION_TYPE_HW_8000,
1738         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1739         IWM_NVM_SECTION_TYPE_PHY_SKU,
1740 };
1741
1742 /* Default NVM size to read */
1743 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1744 #define IWM_MAX_NVM_SECTION_SIZE        8192
1745
1746 #define IWM_NVM_WRITE_OPCODE 1
1747 #define IWM_NVM_READ_OPCODE 0
1748
1749 /* load nvm chunk response */
1750 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1751 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1752
1753 static int
1754 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1755         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1756 {
1757         offset = 0;
1758         struct iwm_nvm_access_cmd nvm_access_cmd = {
1759                 .offset = htole16(offset),
1760                 .length = htole16(length),
1761                 .type = htole16(section),
1762                 .op_code = IWM_NVM_READ_OPCODE,
1763         };
1764         struct iwm_nvm_access_resp *nvm_resp;
1765         struct iwm_rx_packet *pkt;
1766         struct iwm_host_cmd cmd = {
1767                 .id = IWM_NVM_ACCESS_CMD,
1768                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1769                     IWM_CMD_SEND_IN_RFKILL,
1770                 .data = { &nvm_access_cmd, },
1771         };
1772         int ret, offset_read;
1773         size_t bytes_read;
1774         uint8_t *resp_data;
1775
1776         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1777
1778         ret = iwm_send_cmd(sc, &cmd);
1779         if (ret) {
1780                 device_printf(sc->sc_dev,
1781                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1782                 return ret;
1783         }
1784
1785         pkt = cmd.resp_pkt;
1786         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1787                 device_printf(sc->sc_dev,
1788                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1789                     pkt->hdr.flags);
1790                 ret = EIO;
1791                 goto exit;
1792         }
1793
1794         /* Extract NVM response */
1795         nvm_resp = (void *)pkt->data;
1796
1797         ret = le16toh(nvm_resp->status);
1798         bytes_read = le16toh(nvm_resp->length);
1799         offset_read = le16toh(nvm_resp->offset);
1800         resp_data = nvm_resp->data;
1801         if (ret) {
1802                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1803                     "NVM access command failed with status %d\n", ret);
1804                 ret = EINVAL;
1805                 goto exit;
1806         }
1807
1808         if (offset_read != offset) {
1809                 device_printf(sc->sc_dev,
1810                     "NVM ACCESS response with invalid offset %d\n",
1811                     offset_read);
1812                 ret = EINVAL;
1813                 goto exit;
1814         }
1815
1816         if (bytes_read > length) {
1817                 device_printf(sc->sc_dev,
1818                     "NVM ACCESS response with too much data "
1819                     "(%d bytes requested, %zd bytes received)\n",
1820                     length, bytes_read);
1821                 ret = EINVAL;
1822                 goto exit;
1823         }
1824
1825         memcpy(data + offset, resp_data, bytes_read);
1826         *len = bytes_read;
1827
1828  exit:
1829         iwm_free_resp(sc, &cmd);
1830         return ret;
1831 }
1832
1833 /*
1834  * Reads an NVM section completely.
1835  * NICs prior to 7000 family don't have a real NVM, but just read
1836  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1837  * by uCode, we need to manually check in this case that we don't
1838  * overflow and try to read more than the EEPROM size.
1839  * For 7000 family NICs, we supply the maximal size we can read, and
1840  * the uCode fills the response with as much data as we can,
1841  * without overflowing, so no check is needed.
1842  */
1843 static int
1844 iwm_nvm_read_section(struct iwm_softc *sc,
1845         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1846 {
1847         uint16_t chunklen, seglen;
1848         int error = 0;
1849
1850         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1851             "reading NVM section %d\n", section);
1852
1853         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1854         *len = 0;
1855
1856         /* Read NVM chunks until exhausted (reading less than requested) */
1857         while (seglen == chunklen && *len < max_len) {
1858                 error = iwm_nvm_read_chunk(sc,
1859                     section, *len, chunklen, data, &seglen);
1860                 if (error) {
1861                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1862                             "Cannot read from NVM section "
1863                             "%d at offset %d\n", section, *len);
1864                         return error;
1865                 }
1866                 *len += seglen;
1867         }
1868
1869         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1870             "NVM section %d read completed (%d bytes, error=%d)\n",
1871             section, *len, error);
1872         return error;
1873 }
1874
1875 /* NVM offsets (in words) definitions */
1876 enum iwm_nvm_offsets {
1877         /* NVM HW-Section offset (in words) definitions */
1878         IWM_HW_ADDR = 0x15,
1879
1880 /* NVM SW-Section offset (in words) definitions */
1881         IWM_NVM_SW_SECTION = 0x1C0,
1882         IWM_NVM_VERSION = 0,
1883         IWM_RADIO_CFG = 1,
1884         IWM_SKU = 2,
1885         IWM_N_HW_ADDRS = 3,
1886         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1887
1888 /* NVM calibration section offset (in words) definitions */
1889         IWM_NVM_CALIB_SECTION = 0x2B8,
1890         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1891 };
1892
1893 enum iwm_8000_nvm_offsets {
1894         /* NVM HW-Section offset (in words) definitions */
1895         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1896         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1897         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1898         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1899         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1900
1901         /* NVM SW-Section offset (in words) definitions */
1902         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1903         IWM_NVM_VERSION_8000 = 0,
1904         IWM_RADIO_CFG_8000 = 0,
1905         IWM_SKU_8000 = 2,
1906         IWM_N_HW_ADDRS_8000 = 3,
1907
1908         /* NVM REGULATORY -Section offset (in words) definitions */
1909         IWM_NVM_CHANNELS_8000 = 0,
1910         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1911         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1912         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1913
1914         /* NVM calibration section offset (in words) definitions */
1915         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1916         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1917 };
1918
1919 /* SKU Capabilities (actual values from NVM definition) */
1920 enum nvm_sku_bits {
1921         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1922         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1923         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1924         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1925 };
1926
1927 /* radio config bits (actual values from NVM definition) */
1928 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1929 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1930 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1931 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1932 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1933 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1934
1935 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1936 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1937 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1938 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1939 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1940 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1941
1942 #define DEFAULT_MAX_TX_POWER 16
1943
1944 /**
1945  * enum iwm_nvm_channel_flags - channel flags in NVM
1946  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1947  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1948  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1949  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1950  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1951  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1952  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1953  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1954  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1955  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1956  */
1957 enum iwm_nvm_channel_flags {
1958         IWM_NVM_CHANNEL_VALID = (1 << 0),
1959         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1960         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1961         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1962         IWM_NVM_CHANNEL_DFS = (1 << 7),
1963         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1964         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1965         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1966         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1967 };
1968
1969 /*
1970  * Translate EEPROM flags to net80211.
1971  */
1972 static uint32_t
1973 iwm_eeprom_channel_flags(uint16_t ch_flags)
1974 {
1975         uint32_t nflags;
1976
1977         nflags = 0;
1978         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1979                 nflags |= IEEE80211_CHAN_PASSIVE;
1980         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1981                 nflags |= IEEE80211_CHAN_NOADHOC;
1982         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1983                 nflags |= IEEE80211_CHAN_DFS;
1984                 /* Just in case. */
1985                 nflags |= IEEE80211_CHAN_NOADHOC;
1986         }
1987
1988         return (nflags);
1989 }
1990
1991 static void
1992 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1993     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1994     const uint8_t bands[])
1995 {
1996         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1997         uint32_t nflags;
1998         uint16_t ch_flags;
1999         uint8_t ieee;
2000         int error;
2001
2002         for (; ch_idx < ch_num; ch_idx++) {
2003                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2004                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2005                         ieee = iwm_nvm_channels[ch_idx];
2006                 else
2007                         ieee = iwm_nvm_channels_8000[ch_idx];
2008
2009                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2010                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2011                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2012                             ieee, ch_flags,
2013                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2014                             "5.2" : "2.4");
2015                         continue;
2016                 }
2017
2018                 nflags = iwm_eeprom_channel_flags(ch_flags);
2019                 error = ieee80211_add_channel(chans, maxchans, nchans,
2020                     ieee, 0, 0, nflags, bands);
2021                 if (error != 0)
2022                         break;
2023
2024                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2025                     "Ch. %d Flags %x [%sGHz] - Added\n",
2026                     ieee, ch_flags,
2027                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2028                     "5.2" : "2.4");
2029         }
2030 }
2031
2032 static void
2033 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2034     struct ieee80211_channel chans[])
2035 {
2036         struct iwm_softc *sc = ic->ic_softc;
2037         struct iwm_nvm_data *data = &sc->sc_nvm;
2038         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2039         size_t ch_num;
2040
2041         memset(bands, 0, sizeof(bands));
2042         /* 1-13: 11b/g channels. */
2043         setbit(bands, IEEE80211_MODE_11B);
2044         setbit(bands, IEEE80211_MODE_11G);
2045         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2046             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2047
2048         /* 14: 11b channel only. */
2049         clrbit(bands, IEEE80211_MODE_11G);
2050         iwm_add_channel_band(sc, chans, maxchans, nchans,
2051             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2052
2053         if (data->sku_cap_band_52GHz_enable) {
2054                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2055                         ch_num = nitems(iwm_nvm_channels);
2056                 else
2057                         ch_num = nitems(iwm_nvm_channels_8000);
2058                 memset(bands, 0, sizeof(bands));
2059                 setbit(bands, IEEE80211_MODE_11A);
2060                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2061                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2062         }
2063 }
2064
2065 static void
2066 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2067         const uint16_t *mac_override, const uint16_t *nvm_hw)
2068 {
2069         const uint8_t *hw_addr;
2070
2071         if (mac_override) {
2072                 static const uint8_t reserved_mac[] = {
2073                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2074                 };
2075
2076                 hw_addr = (const uint8_t *)(mac_override +
2077                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2078
2079                 /*
2080                  * Store the MAC address from MAO section.
2081                  * No byte swapping is required in MAO section
2082                  */
2083                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2084
2085                 /*
2086                  * Force the use of the OTP MAC address in case of reserved MAC
2087                  * address in the NVM, or if address is given but invalid.
2088                  */
2089                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2090                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2091                     iwm_is_valid_ether_addr(data->hw_addr) &&
2092                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2093                         return;
2094
2095                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2096                     "%s: mac address from nvm override section invalid\n",
2097                     __func__);
2098         }
2099
2100         if (nvm_hw) {
2101                 /* read the mac address from WFMP registers */
2102                 uint32_t mac_addr0 =
2103                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2104                 uint32_t mac_addr1 =
2105                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2106
2107                 hw_addr = (const uint8_t *)&mac_addr0;
2108                 data->hw_addr[0] = hw_addr[3];
2109                 data->hw_addr[1] = hw_addr[2];
2110                 data->hw_addr[2] = hw_addr[1];
2111                 data->hw_addr[3] = hw_addr[0];
2112
2113                 hw_addr = (const uint8_t *)&mac_addr1;
2114                 data->hw_addr[4] = hw_addr[1];
2115                 data->hw_addr[5] = hw_addr[0];
2116
2117                 return;
2118         }
2119
2120         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2121         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2122 }
2123
2124 static int
2125 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2126             const uint16_t *phy_sku)
2127 {
2128         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2129                 return le16_to_cpup(nvm_sw + IWM_SKU);
2130
2131         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2132 }
2133
2134 static int
2135 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2136 {
2137         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2138                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2139         else
2140                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2141                                                 IWM_NVM_VERSION_8000));
2142 }
2143
2144 static int
2145 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2146                   const uint16_t *phy_sku)
2147 {
2148         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2149                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2150
2151         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2152 }
2153
2154 static int
2155 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2156 {
2157         int n_hw_addr;
2158
2159         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2160                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2161
2162         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2163
2164         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2165 }
2166
2167 static void
2168 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2169                   uint32_t radio_cfg)
2170 {
2171         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2172                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2173                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2174                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2175                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2176                 return;
2177         }
2178
2179         /* set the radio configuration for family 8000 */
2180         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2181         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2182         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2183         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2184         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2185         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2186 }
2187
2188 static int
2189 iwm_parse_nvm_data(struct iwm_softc *sc,
2190                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2191                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2192                    const uint16_t *phy_sku, const uint16_t *regulatory)
2193 {
2194         struct iwm_nvm_data *data = &sc->sc_nvm;
2195         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2196         uint32_t sku, radio_cfg;
2197
2198         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2199
2200         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2201         iwm_set_radio_cfg(sc, data, radio_cfg);
2202
2203         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2204         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2205         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2206         data->sku_cap_11n_enable = 0;
2207
2208         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2209
2210         /* The byte order is little endian 16 bit, meaning 214365 */
2211         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2212                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2213                 data->hw_addr[0] = hw_addr[1];
2214                 data->hw_addr[1] = hw_addr[0];
2215                 data->hw_addr[2] = hw_addr[3];
2216                 data->hw_addr[3] = hw_addr[2];
2217                 data->hw_addr[4] = hw_addr[5];
2218                 data->hw_addr[5] = hw_addr[4];
2219         } else {
2220                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2221         }
2222
2223         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2224                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2225                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2226         } else {
2227                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2228                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2229         }
2230         data->calib_version = 255;   /* TODO:
2231                                         this value will prevent some checks from
2232                                         failing, we need to check if this
2233                                         field is still needed, and if it does,
2234                                         where is it in the NVM */
2235
2236         return 0;
2237 }
2238
2239 static int
2240 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2241 {
2242         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2243
2244         /* Checking for required sections */
2245         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2246                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2247                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2248                         device_printf(sc->sc_dev,
2249                             "Can't parse empty OTP/NVM sections\n");
2250                         return ENOENT;
2251                 }
2252
2253                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2254         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2255                 /* SW and REGULATORY sections are mandatory */
2256                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2257                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2258                         device_printf(sc->sc_dev,
2259                             "Can't parse empty OTP/NVM sections\n");
2260                         return ENOENT;
2261                 }
2262                 /* MAC_OVERRIDE or at least HW section must exist */
2263                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2264                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2265                         device_printf(sc->sc_dev,
2266                             "Can't parse mac_address, empty sections\n");
2267                         return ENOENT;
2268                 }
2269
2270                 /* PHY_SKU section is mandatory in B0 */
2271                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2272                         device_printf(sc->sc_dev,
2273                             "Can't parse phy_sku in B0, empty sections\n");
2274                         return ENOENT;
2275                 }
2276
2277                 hw = (const uint16_t *)
2278                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2279         } else {
2280                 panic("unknown device family %d\n", sc->sc_device_family);
2281         }
2282
2283         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2284         calib = (const uint16_t *)
2285             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2286         regulatory = (const uint16_t *)
2287             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2288         mac_override = (const uint16_t *)
2289             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2290         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2291
2292         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2293             phy_sku, regulatory);
2294 }
2295
2296 static int
2297 iwm_nvm_init(struct iwm_softc *sc)
2298 {
2299         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2300         int i, section, error;
2301         uint16_t len;
2302         uint8_t *buf;
2303         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2304
2305         memset(nvm_sections, 0 , sizeof(nvm_sections));
2306
2307         buf = kmalloc(bufsz, M_DEVBUF, M_INTWAIT);
2308         if (buf == NULL)
2309                 return ENOMEM;
2310
2311         for (i = 0; i < nitems(nvm_to_read); i++) {
2312                 section = nvm_to_read[i];
2313                 KKASSERT(section <= nitems(nvm_sections));
2314
2315                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2316                 if (error) {
2317                         error = 0;
2318                         continue;
2319                 }
2320                 nvm_sections[section].data = kmalloc(len, M_DEVBUF, M_INTWAIT);
2321                 if (nvm_sections[section].data == NULL) {
2322                         error = ENOMEM;
2323                         break;
2324                 }
2325                 memcpy(nvm_sections[section].data, buf, len);
2326                 nvm_sections[section].length = len;
2327         }
2328         kfree(buf, M_DEVBUF);
2329         if (error == 0)
2330                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2331
2332         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2333                 if (nvm_sections[i].data != NULL)
2334                         kfree(nvm_sections[i].data, M_DEVBUF);
2335         }
2336
2337         return error;
2338 }
2339
2340 /*
2341  * Firmware loading gunk.  This is kind of a weird hybrid between the
2342  * iwn driver and the Linux iwlwifi driver.
2343  */
2344
2345 static int
2346 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2347         const uint8_t *section, uint32_t byte_cnt)
2348 {
2349         int error = EINVAL;
2350         uint32_t chunk_sz, offset;
2351
2352         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2353
2354         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2355                 uint32_t addr, len;
2356                 const uint8_t *data;
2357
2358                 addr = dst_addr + offset;
2359                 len = MIN(chunk_sz, byte_cnt - offset);
2360                 data = section + offset;
2361
2362                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2363                 if (error)
2364                         break;
2365         }
2366
2367         return error;
2368 }
2369
2370 static int
2371 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2372         const uint8_t *chunk, uint32_t byte_cnt)
2373 {
2374         struct iwm_dma_info *dma = &sc->fw_dma;
2375         int error;
2376
2377         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2378         memcpy(dma->vaddr, chunk, byte_cnt);
2379         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2380
2381         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2382             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2383                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2384                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2385         }
2386
2387         sc->sc_fw_chunk_done = 0;
2388
2389         if (!iwm_nic_lock(sc))
2390                 return EBUSY;
2391
2392         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2393             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2394         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2395             dst_addr);
2396         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2397             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2398         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2399             (iwm_get_dma_hi_addr(dma->paddr)
2400               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2401         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2402             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2403             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2404             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2405         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2406             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2407             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2408             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2409
2410         iwm_nic_unlock(sc);
2411
2412         /* wait 1s for this segment to load */
2413         error = 0;
2414         while (!sc->sc_fw_chunk_done) {
2415 #if defined(__DragonFly__)
2416                 error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2417 #else
2418                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2419 #endif
2420                 if (error)
2421                         break;
2422         }
2423
2424         if (!sc->sc_fw_chunk_done) {
2425                 device_printf(sc->sc_dev,
2426                     "fw chunk addr 0x%x len %d failed to load\n",
2427                     dst_addr, byte_cnt);
2428         }
2429
2430         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2431             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2432                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2433                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2434                 iwm_nic_unlock(sc);
2435         }
2436
2437         return error;
2438 }
2439
2440 int
2441 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2442     int cpu, int *first_ucode_section)
2443 {
2444         int shift_param;
2445         int i, error = 0, sec_num = 0x1;
2446         uint32_t val, last_read_idx = 0;
2447         const void *data;
2448         uint32_t dlen;
2449         uint32_t offset;
2450
2451         if (cpu == 1) {
2452                 shift_param = 0;
2453                 *first_ucode_section = 0;
2454         } else {
2455                 shift_param = 16;
2456                 (*first_ucode_section)++;
2457         }
2458
2459         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2460                 last_read_idx = i;
2461                 data = fws->fw_sect[i].fws_data;
2462                 dlen = fws->fw_sect[i].fws_len;
2463                 offset = fws->fw_sect[i].fws_devoff;
2464
2465                 /*
2466                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2467                  * CPU1 to CPU2.
2468                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2469                  * CPU2 non paged to CPU2 paging sec.
2470                  */
2471                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2472                     offset == IWM_PAGING_SEPARATOR_SECTION)
2473                         break;
2474
2475                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2476                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2477                     i, offset, dlen, cpu);
2478
2479                 if (dlen > sc->sc_fwdmasegsz) {
2480                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2481                             "chunk %d too large (%d bytes)\n", i, dlen);
2482                         error = EFBIG;
2483                 } else {
2484                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2485                 }
2486                 if (error) {
2487                         device_printf(sc->sc_dev,
2488                             "could not load firmware chunk %d (error %d)\n",
2489                             i, error);
2490                         return error;
2491                 }
2492
2493                 /* Notify the ucode of the loaded section number and status */
2494                 if (iwm_nic_lock(sc)) {
2495                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2496                         val = val | (sec_num << shift_param);
2497                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2498                         sec_num = (sec_num << 1) | 0x1;
2499                         iwm_nic_unlock(sc);
2500
2501                         /*
2502                          * The firmware won't load correctly without this delay.
2503                          */
2504                         DELAY(8000);
2505                 }
2506         }
2507
2508         *first_ucode_section = last_read_idx;
2509
2510         if (iwm_nic_lock(sc)) {
2511                 if (cpu == 1)
2512                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2513                 else
2514                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2515                 iwm_nic_unlock(sc);
2516         }
2517
2518         return 0;
2519 }
2520
2521 int
2522 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2523 {
2524         struct iwm_fw_sects *fws;
2525         int error = 0;
2526         int first_ucode_section;
2527
2528         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2529             ucode_type);
2530
2531         fws = &sc->sc_fw.fw_sects[ucode_type];
2532
2533         /* configure the ucode to be ready to get the secured image */
2534         /* release CPU reset */
2535         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2536
2537         /* load to FW the binary Secured sections of CPU1 */
2538         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2539         if (error)
2540                 return error;
2541
2542         /* load to FW the binary sections of CPU2 */
2543         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2544 }
2545
2546 static int
2547 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2548 {
2549         struct iwm_fw_sects *fws;
2550         int error, i;
2551         const void *data;
2552         uint32_t dlen;
2553         uint32_t offset;
2554
2555         sc->sc_uc.uc_intr = 0;
2556
2557         fws = &sc->sc_fw.fw_sects[ucode_type];
2558         for (i = 0; i < fws->fw_count; i++) {
2559                 data = fws->fw_sect[i].fws_data;
2560                 dlen = fws->fw_sect[i].fws_len;
2561                 offset = fws->fw_sect[i].fws_devoff;
2562                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2563                     "LOAD FIRMWARE type %d offset %u len %d\n",
2564                     ucode_type, offset, dlen);
2565                 if (dlen > sc->sc_fwdmasegsz) {
2566                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2567                             "chunk %d too large (%d bytes)\n", i, dlen);
2568                         error = EFBIG;
2569                 } else {
2570                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2571                 }
2572                 if (error) {
2573                         device_printf(sc->sc_dev,
2574                             "could not load firmware chunk %u of %u "
2575                             "(error=%d)\n", i, fws->fw_count, error);
2576                         return error;
2577                 }
2578         }
2579
2580         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2581
2582         return 0;
2583 }
2584
2585 static int
2586 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2587 {
2588         int error, w;
2589
2590         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2591                 error = iwm_load_firmware_8000(sc, ucode_type);
2592         else
2593                 error = iwm_load_firmware_7000(sc, ucode_type);
2594         if (error)
2595                 return error;
2596
2597         /* wait for the firmware to load */
2598         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2599 #if defined(__DragonFly__)
2600                 error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2601 #else
2602                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2603 #endif
2604         }
2605         if (error || !sc->sc_uc.uc_ok) {
2606                 device_printf(sc->sc_dev, "could not load firmware\n");
2607                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2608                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2609                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2610                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2611                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2612                 }
2613         }
2614
2615         /*
2616          * Give the firmware some time to initialize.
2617          * Accessing it too early causes errors.
2618          */
2619         lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2620
2621         return error;
2622 }
2623
2624 static int
2625 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2626 {
2627         int error;
2628
2629         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2630
2631         if ((error = iwm_nic_init(sc)) != 0) {
2632                 device_printf(sc->sc_dev, "unable to init nic\n");
2633                 return error;
2634         }
2635
2636         /* make sure rfkill handshake bits are cleared */
2637         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2638         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2639             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2640
2641         /* clear (again), then enable host interrupts */
2642         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2643         iwm_enable_interrupts(sc);
2644
2645         /* really make sure rfkill handshake bits are cleared */
2646         /* maybe we should write a few times more?  just to make sure */
2647         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2648         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2649
2650         /* Load the given image to the HW */
2651         return iwm_load_firmware(sc, ucode_type);
2652 }
2653
2654 static int
2655 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2656 {
2657         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2658                 .valid = htole32(valid_tx_ant),
2659         };
2660
2661         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2662             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2663 }
2664
2665 static int
2666 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2667 {
2668         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2669         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2670
2671         /* Set parameters */
2672         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2673         phy_cfg_cmd.calib_control.event_trigger =
2674             sc->sc_default_calib[ucode_type].event_trigger;
2675         phy_cfg_cmd.calib_control.flow_trigger =
2676             sc->sc_default_calib[ucode_type].flow_trigger;
2677
2678         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2679             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2680         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2681             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2682 }
2683
2684 static int
2685 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2686         enum iwm_ucode_type ucode_type)
2687 {
2688         enum iwm_ucode_type old_type = sc->sc_uc_current;
2689         int error;
2690
2691         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2692                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2693                         error);
2694                 return error;
2695         }
2696
2697         sc->sc_uc_current = ucode_type;
2698         error = iwm_start_fw(sc, ucode_type);
2699         if (error) {
2700                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2701                 sc->sc_uc_current = old_type;
2702                 return error;
2703         }
2704
2705         error = iwm_post_alive(sc);
2706         if (error) {
2707                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2708         }
2709         return error;
2710 }
2711
2712 /*
2713  * mvm misc bits
2714  */
2715
2716 static int
2717 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2718 {
2719         int error;
2720
2721         /* do not operate with rfkill switch turned on */
2722         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2723                 device_printf(sc->sc_dev,
2724                     "radio is disabled by hardware switch\n");
2725                 return EPERM;
2726         }
2727
2728         sc->sc_init_complete = 0;
2729         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2730             IWM_UCODE_TYPE_INIT)) != 0) {
2731                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2732                 return error;
2733         }
2734
2735         if (justnvm) {
2736                 if ((error = iwm_nvm_init(sc)) != 0) {
2737                         device_printf(sc->sc_dev, "failed to read nvm\n");
2738                         return error;
2739                 }
2740                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2741
2742                 return 0;
2743         }
2744
2745         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2746                 device_printf(sc->sc_dev,
2747                     "failed to send bt coex configuration: %d\n", error);
2748                 return error;
2749         }
2750
2751         /* Init Smart FIFO. */
2752         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2753         if (error != 0)
2754                 return error;
2755
2756         /* Send TX valid antennas before triggering calibrations */
2757         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2758                 device_printf(sc->sc_dev,
2759                     "failed to send antennas before calibration: %d\n", error);
2760                 return error;
2761         }
2762
2763         /*
2764          * Send phy configurations command to init uCode
2765          * to start the 16.0 uCode init image internal calibrations.
2766          */
2767         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2768                 device_printf(sc->sc_dev,
2769                     "%s: failed to run internal calibration: %d\n",
2770                     __func__, error);
2771                 return error;
2772         }
2773
2774         /*
2775          * Nothing to do but wait for the init complete notification
2776          * from the firmware
2777          */
2778         while (!sc->sc_init_complete) {
2779 #if defined(__DragonFly__)
2780                 error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
2781                                  0, "iwminit", 2*hz);
2782 #else
2783                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2784                                  0, "iwminit", 2*hz);
2785 #endif
2786                 if (error) {
2787                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2788                                 sc->sc_init_complete);
2789                         break;
2790                 }
2791         }
2792
2793         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2794             sc->sc_init_complete ? "" : "not ");
2795
2796         return error;
2797 }
2798
2799 /*
2800  * receive side
2801  */
2802
2803 /* (re)stock rx ring, called at init-time and at runtime */
2804 static int
2805 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2806 {
2807         struct iwm_rx_ring *ring = &sc->rxq;
2808         struct iwm_rx_data *data = &ring->data[idx];
2809         struct mbuf *m;
2810         bus_dmamap_t dmamap = NULL;
2811         bus_dma_segment_t seg;
2812         int nsegs, error;
2813
2814         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2815         if (m == NULL)
2816                 return ENOBUFS;
2817
2818         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2819 #if defined(__DragonFly__)
2820         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2821             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2822 #else
2823         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2824             &seg, &nsegs, BUS_DMA_NOWAIT);
2825 #endif
2826         if (error != 0) {
2827                 device_printf(sc->sc_dev,
2828                     "%s: can't map mbuf, error %d\n", __func__, error);
2829                 goto fail;
2830         }
2831
2832         if (data->m != NULL)
2833                 bus_dmamap_unload(ring->data_dmat, data->map);
2834
2835         /* Swap ring->spare_map with data->map */
2836         dmamap = data->map;
2837         data->map = ring->spare_map;
2838         ring->spare_map = dmamap;
2839
2840         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2841         data->m = m;
2842
2843         /* Update RX descriptor. */
2844         KKASSERT((seg.ds_addr & 255) == 0);
2845         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2846         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2847             BUS_DMASYNC_PREWRITE);
2848
2849         return 0;
2850 fail:
2851         m_freem(m);
2852         return error;
2853 }
2854
2855 #define IWM_RSSI_OFFSET 50
2856 static int
2857 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2858 {
2859         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2860         uint32_t agc_a, agc_b;
2861         uint32_t val;
2862
2863         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2864         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2865         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2866
2867         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2868         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2869         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2870
2871         /*
2872          * dBm = rssi dB - agc dB - constant.
2873          * Higher AGC (higher radio gain) means lower signal.
2874          */
2875         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2876         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2877         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2878
2879         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2880             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2881             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2882
2883         return max_rssi_dbm;
2884 }
2885
2886 /*
2887  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2888  * values are reported by the fw as positive values - need to negate
2889  * to obtain their dBM.  Account for missing antennas by replacing 0
2890  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2891  */
2892 static int
2893 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2894 {
2895         int energy_a, energy_b, energy_c, max_energy;
2896         uint32_t val;
2897
2898         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2899         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2900             IWM_RX_INFO_ENERGY_ANT_A_POS;
2901         energy_a = energy_a ? -energy_a : -256;
2902         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2903             IWM_RX_INFO_ENERGY_ANT_B_POS;
2904         energy_b = energy_b ? -energy_b : -256;
2905         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2906             IWM_RX_INFO_ENERGY_ANT_C_POS;
2907         energy_c = energy_c ? -energy_c : -256;
2908         max_energy = MAX(energy_a, energy_b);
2909         max_energy = MAX(max_energy, energy_c);
2910
2911         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2912             "energy In A %d B %d C %d , and max %d\n",
2913             energy_a, energy_b, energy_c, max_energy);
2914
2915         return max_energy;
2916 }
2917
2918 static void
2919 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2920         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2921 {
2922         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2923
2924         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2925         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2926
2927         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2928 }
2929
2930 /*
2931  * Retrieve the average noise (in dBm) among receivers.
2932  */
2933 static int
2934 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2935 {
2936         int i, total, nbant, noise;
2937
2938         total = nbant = noise = 0;
2939         for (i = 0; i < 3; i++) {
2940                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2941                 if (noise) {
2942                         total += noise;
2943                         nbant++;
2944                 }
2945         }
2946
2947         /* There should be at least one antenna but check anyway. */
2948         return (nbant == 0) ? -127 : (total / nbant) - 107;
2949 }
2950
2951 /*
2952  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2953  *
2954  * Handles the actual data of the Rx packet from the fw
2955  */
2956 static void
2957 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2958         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2959 {
2960         struct ieee80211com *ic = &sc->sc_ic;
2961         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2962         struct ieee80211_frame *wh;
2963         struct ieee80211_node *ni;
2964         struct ieee80211_rx_stats rxs;
2965         struct mbuf *m;
2966         struct iwm_rx_phy_info *phy_info;
2967         struct iwm_rx_mpdu_res_start *rx_res;
2968         uint32_t len;
2969         uint32_t rx_pkt_status;
2970         int rssi;
2971
2972         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2973
2974         phy_info = &sc->sc_last_phy_info;
2975         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2976         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2977         len = le16toh(rx_res->byte_count);
2978         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2979
2980         m = data->m;
2981         m->m_data = pkt->data + sizeof(*rx_res);
2982         m->m_pkthdr.len = m->m_len = len;
2983
2984         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2985                 device_printf(sc->sc_dev,
2986                     "dsp size out of range [0,20]: %d\n",
2987                     phy_info->cfg_phy_cnt);
2988                 return;
2989         }
2990
2991         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2992             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2993                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2994                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2995                 return; /* drop */
2996         }
2997
2998         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2999                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3000         } else {
3001                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3002         }
3003         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
3004         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
3005
3006         /* replenish ring for the buffer we're going to feed to the sharks */
3007         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3008                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3009                     __func__);
3010                 return;
3011         }
3012
3013         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3014
3015         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3016             "%s: phy_info: channel=%d, flags=0x%08x\n",
3017             __func__,
3018             le16toh(phy_info->channel),
3019             le16toh(phy_info->phy_flags));
3020
3021         /*
3022          * Populate an RX state struct with the provided information.
3023          */
3024         bzero(&rxs, sizeof(rxs));
3025         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3026         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3027         rxs.c_ieee = le16toh(phy_info->channel);
3028         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3029                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3030         } else {
3031                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3032         }
3033         rxs.rssi = rssi - sc->sc_noise;
3034         rxs.nf = sc->sc_noise;
3035
3036         if (ieee80211_radiotap_active_vap(vap)) {
3037                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3038
3039                 tap->wr_flags = 0;
3040                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3041                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3042                 tap->wr_chan_freq = htole16(rxs.c_freq);
3043                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3044                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3045                 tap->wr_dbm_antsignal = (int8_t)rssi;
3046                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3047                 tap->wr_tsft = phy_info->system_timestamp;
3048                 switch (phy_info->rate) {
3049                 /* CCK rates. */
3050                 case  10: tap->wr_rate =   2; break;
3051                 case  20: tap->wr_rate =   4; break;
3052                 case  55: tap->wr_rate =  11; break;
3053                 case 110: tap->wr_rate =  22; break;
3054                 /* OFDM rates. */
3055                 case 0xd: tap->wr_rate =  12; break;
3056                 case 0xf: tap->wr_rate =  18; break;
3057                 case 0x5: tap->wr_rate =  24; break;
3058                 case 0x7: tap->wr_rate =  36; break;
3059                 case 0x9: tap->wr_rate =  48; break;
3060                 case 0xb: tap->wr_rate =  72; break;
3061                 case 0x1: tap->wr_rate =  96; break;
3062                 case 0x3: tap->wr_rate = 108; break;
3063                 /* Unknown rate: should not happen. */
3064                 default:  tap->wr_rate =   0;
3065                 }
3066         }
3067
3068         IWM_UNLOCK(sc);
3069         if (ni != NULL) {
3070                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3071                 ieee80211_input_mimo(ni, m, &rxs);
3072                 ieee80211_free_node(ni);
3073         } else {
3074                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3075                 ieee80211_input_mimo_all(ic, m, &rxs);
3076         }
3077         IWM_LOCK(sc);
3078 }
3079
3080 static int
3081 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3082         struct iwm_node *in)
3083 {
3084         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3085         struct ieee80211_node *ni = &in->in_ni;
3086         struct ieee80211vap *vap = ni->ni_vap;
3087         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3088         int failack = tx_resp->failure_frame;
3089
3090         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3091
3092         /* Update rate control statistics. */
3093         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3094             __func__,
3095             (int) le16toh(tx_resp->status.status),
3096             (int) le16toh(tx_resp->status.sequence),
3097             tx_resp->frame_count,
3098             tx_resp->bt_kill_count,
3099             tx_resp->failure_rts,
3100             tx_resp->failure_frame,
3101             le32toh(tx_resp->initial_rate),
3102             (int) le16toh(tx_resp->wireless_media_time));
3103
3104         if (status != IWM_TX_STATUS_SUCCESS &&
3105             status != IWM_TX_STATUS_DIRECT_DONE) {
3106                 ieee80211_ratectl_tx_complete(vap, ni,
3107                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3108                 return (1);
3109         } else {
3110                 ieee80211_ratectl_tx_complete(vap, ni,
3111                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3112                 return (0);
3113         }
3114 }
3115
3116 static void
3117 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3118         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3119 {
3120         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3121         int idx = cmd_hdr->idx;
3122         int qid = cmd_hdr->qid;
3123         struct iwm_tx_ring *ring = &sc->txq[qid];
3124         struct iwm_tx_data *txd = &ring->data[idx];
3125         struct iwm_node *in = txd->in;
3126         struct mbuf *m = txd->m;
3127         int status;
3128
3129         KASSERT(txd->done == 0, ("txd not done"));
3130         KASSERT(txd->in != NULL, ("txd without node"));
3131         KASSERT(txd->m != NULL, ("txd without mbuf"));
3132
3133         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3134
3135         sc->sc_tx_timer = 0;
3136
3137         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3138
3139         /* Unmap and free mbuf. */
3140         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3141         bus_dmamap_unload(ring->data_dmat, txd->map);
3142
3143         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3144             "free txd %p, in %p\n", txd, txd->in);
3145         txd->done = 1;
3146         txd->m = NULL;
3147         txd->in = NULL;
3148
3149         ieee80211_tx_complete(&in->in_ni, m, status);
3150
3151         if (--ring->queued < IWM_TX_RING_LOMARK) {
3152                 sc->qfullmsk &= ~(1 << ring->qid);
3153                 if (sc->qfullmsk == 0) {
3154                         /*
3155                          * Well, we're in interrupt context, but then again
3156                          * I guess net80211 does all sorts of stunts in
3157                          * interrupt context, so maybe this is no biggie.
3158                          */
3159                         iwm_start(sc);
3160                 }
3161         }
3162 }
3163
3164 /*
3165  * transmit side
3166  */
3167
3168 /*
3169  * Process a "command done" firmware notification.  This is where we wakeup
3170  * processes waiting for a synchronous command completion.
3171  * from if_iwn
3172  */
3173 static void
3174 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3175 {
3176         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3177         struct iwm_tx_data *data;
3178
3179         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3180                 return; /* Not a command ack. */
3181         }
3182
3183         data = &ring->data[pkt->hdr.idx];
3184
3185         /* If the command was mapped in an mbuf, free it. */
3186         if (data->m != NULL) {
3187                 bus_dmamap_sync(ring->data_dmat, data->map,
3188                     BUS_DMASYNC_POSTWRITE);
3189                 bus_dmamap_unload(ring->data_dmat, data->map);
3190                 m_freem(data->m);
3191                 data->m = NULL;
3192         }
3193         wakeup(&ring->desc[pkt->hdr.idx]);
3194 }
3195
3196 #if 0
3197 /*
3198  * necessary only for block ack mode
3199  */
3200 void
3201 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3202         uint16_t len)
3203 {
3204         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3205         uint16_t w_val;
3206
3207         scd_bc_tbl = sc->sched_dma.vaddr;
3208
3209         len += 8; /* magic numbers came naturally from paris */
3210         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3211                 len = roundup(len, 4) / 4;
3212
3213         w_val = htole16(sta_id << 12 | len);
3214
3215         /* Update TX scheduler. */
3216         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3217         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3218             BUS_DMASYNC_PREWRITE);
3219
3220         /* I really wonder what this is ?!? */
3221         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3222                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3223                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3224                     BUS_DMASYNC_PREWRITE);
3225         }
3226 }
3227 #endif
3228
3229 /*
3230  * Take an 802.11 (non-n) rate, find the relevant rate
3231  * table entry.  return the index into in_ridx[].
3232  *
3233  * The caller then uses that index back into in_ridx
3234  * to figure out the rate index programmed /into/
3235  * the firmware for this given node.
3236  */
3237 static int
3238 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3239     uint8_t rate)
3240 {
3241         int i;
3242         uint8_t r;
3243
3244         for (i = 0; i < nitems(in->in_ridx); i++) {
3245                 r = iwm_rates[in->in_ridx[i]].rate;
3246                 if (rate == r)
3247                         return (i);
3248         }
3249         /* XXX Return the first */
3250         /* XXX TODO: have it return the /lowest/ */
3251         return (0);
3252 }
3253
3254 /*
3255  * Fill in the rate related information for a transmit command.
3256  */
3257 static const struct iwm_rate *
3258 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3259         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3260 {
3261         struct ieee80211com *ic = &sc->sc_ic;
3262         struct ieee80211_node *ni = &in->in_ni;
3263         const struct iwm_rate *rinfo;
3264         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3265         int ridx, rate_flags;
3266
3267         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3268         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3269
3270         /*
3271          * XXX TODO: everything about the rate selection here is terrible!
3272          */
3273
3274         if (type == IEEE80211_FC0_TYPE_DATA) {
3275                 int i;
3276                 /* for data frames, use RS table */
3277                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3278                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3279                 ridx = in->in_ridx[i];
3280
3281                 /* This is the index into the programmed table */
3282                 tx->initial_rate_index = i;
3283                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3284                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3285                     "%s: start with i=%d, txrate %d\n",
3286                     __func__, i, iwm_rates[ridx].rate);
3287         } else {
3288                 /*
3289                  * For non-data, use the lowest supported rate for the given
3290                  * operational mode.
3291                  *
3292                  * Note: there may not be any rate control information available.
3293                  * This driver currently assumes if we're transmitting data
3294                  * frames, use the rate control table.  Grr.
3295                  *
3296                  * XXX TODO: use the configured rate for the traffic type!
3297                  * XXX TODO: this should be per-vap, not curmode; as we later
3298                  * on we'll want to handle off-channel stuff (eg TDLS).
3299                  */
3300                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3301                         /*
3302                          * XXX this assumes the mode is either 11a or not 11a;
3303                          * definitely won't work for 11n.
3304                          */
3305                         ridx = IWM_RIDX_OFDM;
3306                 } else {
3307                         ridx = IWM_RIDX_CCK;
3308                 }
3309         }
3310
3311         rinfo = &iwm_rates[ridx];
3312
3313         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3314             __func__, ridx,
3315             rinfo->rate,
3316             !! (IWM_RIDX_IS_CCK(ridx))
3317             );
3318
3319         /* XXX TODO: hard-coded TX antenna? */
3320         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3321         if (IWM_RIDX_IS_CCK(ridx))
3322                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3323         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3324
3325         return rinfo;
3326 }
3327
3328 #define TB0_SIZE 16
3329 static int
3330 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3331 {
3332         struct ieee80211com *ic = &sc->sc_ic;
3333         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3334         struct iwm_node *in = IWM_NODE(ni);
3335         struct iwm_tx_ring *ring;
3336         struct iwm_tx_data *data;
3337         struct iwm_tfd *desc;
3338         struct iwm_device_cmd *cmd;
3339         struct iwm_tx_cmd *tx;
3340         struct ieee80211_frame *wh;
3341         struct ieee80211_key *k = NULL;
3342 #if !defined(__DragonFly__)
3343         struct mbuf *m1;
3344 #endif
3345         const struct iwm_rate *rinfo;
3346         uint32_t flags;
3347         u_int hdrlen;
3348         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3349         int nsegs;
3350         uint8_t tid, type;
3351         int i, totlen, error, pad;
3352
3353         wh = mtod(m, struct ieee80211_frame *);
3354         hdrlen = ieee80211_anyhdrsize(wh);
3355         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3356         tid = 0;
3357         ring = &sc->txq[ac];
3358         desc = &ring->desc[ring->cur];
3359         memset(desc, 0, sizeof(*desc));
3360         data = &ring->data[ring->cur];
3361
3362         /* Fill out iwm_tx_cmd to send to the firmware */
3363         cmd = &ring->cmd[ring->cur];
3364         cmd->hdr.code = IWM_TX_CMD;
3365         cmd->hdr.flags = 0;
3366         cmd->hdr.qid = ring->qid;
3367         cmd->hdr.idx = ring->cur;
3368
3369         tx = (void *)cmd->data;
3370         memset(tx, 0, sizeof(*tx));
3371
3372         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3373
3374         /* Encrypt the frame if need be. */
3375         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3376                 /* Retrieve key for TX && do software encryption. */
3377                 k = ieee80211_crypto_encap(ni, m);
3378                 if (k == NULL) {
3379                         m_freem(m);
3380                         return (ENOBUFS);
3381                 }
3382                 /* 802.11 header may have moved. */
3383                 wh = mtod(m, struct ieee80211_frame *);
3384         }
3385
3386         if (ieee80211_radiotap_active_vap(vap)) {
3387                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3388
3389                 tap->wt_flags = 0;
3390                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3391                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3392                 tap->wt_rate = rinfo->rate;
3393                 if (k != NULL)
3394                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3395                 ieee80211_radiotap_tx(vap, m);
3396         }
3397
3398
3399         totlen = m->m_pkthdr.len;
3400
3401         flags = 0;
3402         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3403                 flags |= IWM_TX_CMD_FLG_ACK;
3404         }
3405
3406         if (type == IEEE80211_FC0_TYPE_DATA
3407             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3408             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3409                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3410         }
3411
3412         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3413             type != IEEE80211_FC0_TYPE_DATA)
3414                 tx->sta_id = sc->sc_aux_sta.sta_id;
3415         else
3416                 tx->sta_id = IWM_STATION_ID;
3417
3418         if (type == IEEE80211_FC0_TYPE_MGT) {
3419                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3420
3421                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3422                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3423                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3424                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3425                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3426                 } else {
3427                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3428                 }
3429         } else {
3430                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3431         }
3432
3433         if (hdrlen & 3) {
3434                 /* First segment length must be a multiple of 4. */
3435                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3436                 pad = 4 - (hdrlen & 3);
3437         } else
3438                 pad = 0;
3439
3440         tx->driver_txop = 0;
3441         tx->next_frame_len = 0;
3442
3443         tx->len = htole16(totlen);
3444         tx->tid_tspec = tid;
3445         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3446
3447         /* Set physical address of "scratch area". */
3448         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3449         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3450
3451         /* Copy 802.11 header in TX command. */
3452         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3453
3454         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3455
3456         tx->sec_ctl = 0;
3457         tx->tx_flags |= htole32(flags);
3458
3459         /* Trim 802.11 header. */
3460         m_adj(m, hdrlen);
3461 #if defined(__DragonFly__)
3462         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3463                                             segs, IWM_MAX_SCATTER - 2,
3464                                             &nsegs, BUS_DMA_NOWAIT);
3465 #else
3466         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3467             segs, &nsegs, BUS_DMA_NOWAIT);
3468 #endif
3469         if (error != 0) {
3470 #if defined(__DragonFly__)
3471                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3472                     error);
3473                 m_freem(m);
3474                 return error;
3475 #else
3476                 if (error != EFBIG) {
3477                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3478                             error);
3479                         m_freem(m);
3480                         return error;
3481                 }
3482                 /* Too many DMA segments, linearize mbuf. */
3483                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3484                 if (m1 == NULL) {
3485                         device_printf(sc->sc_dev,
3486                             "%s: could not defrag mbuf\n", __func__);
3487                         m_freem(m);
3488                         return (ENOBUFS);
3489                 }
3490                 m = m1;
3491
3492                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3493                     segs, &nsegs, BUS_DMA_NOWAIT);
3494                 if (error != 0) {
3495                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3496                             error);
3497                         m_freem(m);
3498                         return error;
3499                 }
3500 #endif
3501         }
3502         data->m = m;
3503         data->in = in;
3504         data->done = 0;
3505
3506         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3507             "sending txd %p, in %p\n", data, data->in);
3508         KASSERT(data->in != NULL, ("node is NULL"));
3509
3510         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3511             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3512             ring->qid, ring->cur, totlen, nsegs,
3513             le32toh(tx->tx_flags),
3514             le32toh(tx->rate_n_flags),
3515             tx->initial_rate_index
3516             );
3517
3518         /* Fill TX descriptor. */
3519         desc->num_tbs = 2 + nsegs;
3520
3521         desc->tbs[0].lo = htole32(data->cmd_paddr);
3522         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3523             (TB0_SIZE << 4);
3524         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3525         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3526             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3527               + hdrlen + pad - TB0_SIZE) << 4);
3528
3529         /* Other DMA segments are for data payload. */
3530         for (i = 0; i < nsegs; i++) {
3531                 seg = &segs[i];
3532                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3533                 desc->tbs[i+2].hi_n_len = \
3534                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3535                     | ((seg->ds_len) << 4);
3536         }
3537
3538         bus_dmamap_sync(ring->data_dmat, data->map,
3539             BUS_DMASYNC_PREWRITE);
3540         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3541             BUS_DMASYNC_PREWRITE);
3542         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3543             BUS_DMASYNC_PREWRITE);
3544
3545 #if 0
3546         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3547 #endif
3548
3549         /* Kick TX ring. */
3550         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3551         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3552
3553         /* Mark TX ring as full if we reach a certain threshold. */
3554         if (++ring->queued > IWM_TX_RING_HIMARK) {
3555                 sc->qfullmsk |= 1 << ring->qid;
3556         }
3557
3558         return 0;
3559 }
3560
3561 static int
3562 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3563     const struct ieee80211_bpf_params *params)
3564 {
3565         struct ieee80211com *ic = ni->ni_ic;
3566         struct iwm_softc *sc = ic->ic_softc;
3567         int error = 0;
3568
3569         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3570             "->%s begin\n", __func__);
3571
3572         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3573                 m_freem(m);
3574                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3575                     "<-%s not RUNNING\n", __func__);
3576                 return (ENETDOWN);
3577         }
3578
3579         IWM_LOCK(sc);
3580         /* XXX fix this */
3581         if (params == NULL) {
3582                 error = iwm_tx(sc, m, ni, 0);
3583         } else {
3584                 error = iwm_tx(sc, m, ni, 0);
3585         }
3586         sc->sc_tx_timer = 5;
3587         IWM_UNLOCK(sc);
3588
3589         return (error);
3590 }
3591
3592 /*
3593  * mvm/tx.c
3594  */
3595
3596 #if 0
3597 /*
3598  * Note that there are transports that buffer frames before they reach
3599  * the firmware. This means that after flush_tx_path is called, the
3600  * queue might not be empty. The race-free way to handle this is to:
3601  * 1) set the station as draining
3602  * 2) flush the Tx path
3603  * 3) wait for the transport queues to be empty
3604  */
3605 int
3606 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3607 {
3608         struct iwm_tx_path_flush_cmd flush_cmd = {
3609                 .queues_ctl = htole32(tfd_msk),
3610                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3611         };
3612         int ret;
3613
3614         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3615             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3616             sizeof(flush_cmd), &flush_cmd);
3617         if (ret)
3618                 device_printf(sc->sc_dev,
3619                     "Flushing tx queue failed: %d\n", ret);
3620         return ret;
3621 }
3622 #endif
3623
3624 static int
3625 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3626         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3627 {
3628         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3629             cmd, status);
3630 }
3631
3632 /* send station add/update command to firmware */
3633 static int
3634 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3635 {
3636         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3637         int ret;
3638         uint32_t status;
3639
3640         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3641
3642         add_sta_cmd.sta_id = IWM_STATION_ID;
3643         add_sta_cmd.mac_id_n_color
3644             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3645                 IWM_DEFAULT_COLOR));
3646         if (!update) {
3647                 int ac;
3648                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3649                         add_sta_cmd.tfd_queue_msk |=
3650                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3651                 }
3652                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3653         }
3654         add_sta_cmd.add_modify = update ? 1 : 0;
3655         add_sta_cmd.station_flags_msk
3656             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3657         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3658         if (update)
3659                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3660
3661         status = IWM_ADD_STA_SUCCESS;
3662         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3663         if (ret)
3664                 return ret;
3665
3666         switch (status) {
3667         case IWM_ADD_STA_SUCCESS:
3668                 break;
3669         default:
3670                 ret = EIO;
3671                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3672                 break;
3673         }
3674
3675         return ret;
3676 }
3677
3678 static int
3679 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3680 {
3681         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3682 }
3683
3684 static int
3685 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3686 {
3687         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3688 }
3689
3690 static int
3691 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3692         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3693 {
3694         struct iwm_mvm_add_sta_cmd_v7 cmd;
3695         int ret;
3696         uint32_t status;
3697
3698         memset(&cmd, 0, sizeof(cmd));
3699         cmd.sta_id = sta->sta_id;
3700         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3701
3702         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3703         cmd.tid_disable_tx = htole16(0xffff);
3704
3705         if (addr)
3706                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3707
3708         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3709         if (ret)
3710                 return ret;
3711
3712         switch (status) {
3713         case IWM_ADD_STA_SUCCESS:
3714                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3715                     "%s: Internal station added.\n", __func__);
3716                 return 0;
3717         default:
3718                 device_printf(sc->sc_dev,
3719                     "%s: Add internal station failed, status=0x%x\n",
3720                     __func__, status);
3721                 ret = EIO;
3722                 break;
3723         }
3724         return ret;
3725 }
3726
3727 static int
3728 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3729 {
3730         int ret;
3731
3732         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3733         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3734
3735         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3736         if (ret)
3737                 return ret;
3738
3739         ret = iwm_mvm_add_int_sta_common(sc,
3740             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3741
3742         if (ret)
3743                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3744         return ret;
3745 }
3746
3747 static int
3748 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3749 {
3750         struct iwm_time_quota_cmd cmd;
3751         int i, idx, ret, num_active_macs, quota, quota_rem;
3752         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3753         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3754         uint16_t id;
3755
3756         memset(&cmd, 0, sizeof(cmd));
3757
3758         /* currently, PHY ID == binding ID */
3759         if (in) {
3760                 id = in->in_phyctxt->id;
3761                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3762                 colors[id] = in->in_phyctxt->color;
3763
3764                 if (1)
3765                         n_ifs[id] = 1;
3766         }
3767
3768         /*
3769          * The FW's scheduling session consists of
3770          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3771          * equally between all the bindings that require quota
3772          */
3773         num_active_macs = 0;
3774         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3775                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3776                 num_active_macs += n_ifs[i];
3777         }
3778
3779         quota = 0;
3780         quota_rem = 0;
3781         if (num_active_macs) {
3782                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3783                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3784         }
3785
3786         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3787                 if (colors[i] < 0)
3788                         continue;
3789
3790                 cmd.quotas[idx].id_and_color =
3791                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3792
3793                 if (n_ifs[i] <= 0) {
3794                         cmd.quotas[idx].quota = htole32(0);
3795                         cmd.quotas[idx].max_duration = htole32(0);
3796                 } else {
3797                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3798                         cmd.quotas[idx].max_duration = htole32(0);
3799                 }
3800                 idx++;
3801         }
3802
3803         /* Give the remainder of the session to the first binding */
3804         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3805
3806         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3807             sizeof(cmd), &cmd);
3808         if (ret)
3809                 device_printf(sc->sc_dev,
3810                     "%s: Failed to send quota: %d\n", __func__, ret);
3811         return ret;
3812 }
3813
3814 /*
3815  * ieee80211 routines
3816  */
3817
3818 /*
3819  * Change to AUTH state in 80211 state machine.  Roughly matches what
3820  * Linux does in bss_info_changed().
3821  */
3822 static int
3823 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3824 {
3825         struct ieee80211_node *ni;
3826         struct iwm_node *in;
3827         struct iwm_vap *iv = IWM_VAP(vap);
3828         uint32_t duration;
3829         int error;
3830
3831         /*
3832          * XXX i have a feeling that the vap node is being
3833          * freed from underneath us. Grr.
3834          */
3835         ni = ieee80211_ref_node(vap->iv_bss);
3836         in = IWM_NODE(ni);
3837         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3838             "%s: called; vap=%p, bss ni=%p\n",
3839             __func__,
3840             vap,
3841             ni);
3842
3843         in->in_assoc = 0;
3844
3845         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3846         if (error != 0)
3847                 return error;
3848
3849         error = iwm_allow_mcast(vap, sc);
3850         if (error) {
3851                 device_printf(sc->sc_dev,
3852                     "%s: failed to set multicast\n", __func__);
3853                 goto out;
3854         }
3855
3856         /*
3857          * This is where it deviates from what Linux does.
3858          *
3859          * Linux iwlwifi doesn't reset the nic each time, nor does it
3860          * call ctxt_add() here.  Instead, it adds it during vap creation,
3861          * and always does a mac_ctx_changed().
3862          *
3863          * The openbsd port doesn't attempt to do that - it reset things
3864          * at odd states and does the add here.
3865          *
3866          * So, until the state handling is fixed (ie, we never reset
3867          * the NIC except for a firmware failure, which should drag
3868          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3869          * contexts that are required), let's do a dirty hack here.
3870          */
3871         if (iv->is_uploaded) {
3872                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3873                         device_printf(sc->sc_dev,
3874                             "%s: failed to update MAC\n", __func__);
3875                         goto out;
3876                 }
3877                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3878                     in->in_ni.ni_chan, 1, 1)) != 0) {
3879                         device_printf(sc->sc_dev,
3880                             "%s: failed update phy ctxt\n", __func__);
3881                         goto out;
3882                 }
3883                 in->in_phyctxt = &sc->sc_phyctxt[0];
3884
3885                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3886                         device_printf(sc->sc_dev,
3887                             "%s: binding update cmd\n", __func__);
3888                         goto out;
3889                 }
3890                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3891                         device_printf(sc->sc_dev,
3892                             "%s: failed to update sta\n", __func__);
3893                         goto out;
3894                 }
3895         } else {
3896                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3897                         device_printf(sc->sc_dev,
3898                             "%s: failed to add MAC\n", __func__);
3899                         goto out;
3900                 }
3901                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3902                     in->in_ni.ni_chan, 1, 1)) != 0) {
3903                         device_printf(sc->sc_dev,
3904                             "%s: failed add phy ctxt!\n", __func__);
3905                         error = ETIMEDOUT;
3906                         goto out;
3907                 }
3908                 in->in_phyctxt = &sc->sc_phyctxt[0];
3909
3910                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3911                         device_printf(sc->sc_dev,
3912                             "%s: binding add cmd\n", __func__);
3913                         goto out;
3914                 }
3915                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3916                         device_printf(sc->sc_dev,
3917                             "%s: failed to add sta\n", __func__);
3918                         goto out;
3919                 }
3920         }
3921
3922         /*
3923          * Prevent the FW from wandering off channel during association
3924          * by "protecting" the session with a time event.
3925          */
3926         /* XXX duration is in units of TU, not MS */
3927         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3928         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3929         DELAY(100);
3930
3931         error = 0;
3932 out:
3933         ieee80211_free_node(ni);
3934         return (error);
3935 }
3936
3937 static int
3938 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3939 {
3940         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3941         int error;
3942
3943         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3944                 device_printf(sc->sc_dev,
3945                     "%s: failed to update STA\n", __func__);
3946                 return error;
3947         }
3948
3949         in->in_assoc = 1;
3950         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3951                 device_printf(sc->sc_dev,
3952                     "%s: failed to update MAC\n", __func__);
3953                 return error;
3954         }
3955
3956         return 0;
3957 }
3958
3959 static int
3960 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3961 {
3962         /*
3963          * Ok, so *technically* the proper set of calls for going
3964          * from RUN back to SCAN is:
3965          *
3966          * iwm_mvm_power_mac_disable(sc, in);
3967          * iwm_mvm_mac_ctxt_changed(sc, in);
3968          * iwm_mvm_rm_sta(sc, in);
3969          * iwm_mvm_update_quotas(sc, NULL);
3970          * iwm_mvm_mac_ctxt_changed(sc, in);
3971          * iwm_mvm_binding_remove_vif(sc, in);
3972          * iwm_mvm_mac_ctxt_remove(sc, in);
3973          *
3974          * However, that freezes the device not matter which permutations
3975          * and modifications are attempted.  Obviously, this driver is missing
3976          * something since it works in the Linux driver, but figuring out what
3977          * is missing is a little more complicated.  Now, since we're going
3978          * back to nothing anyway, we'll just do a complete device reset.
3979          * Up your's, device!
3980          */
3981         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
3982         iwm_stop_device(sc);
3983         iwm_init_hw(sc);
3984         if (in)
3985                 in->in_assoc = 0;
3986         return 0;
3987
3988 #if 0
3989         int error;
3990
3991         iwm_mvm_power_mac_disable(sc, in);
3992
3993         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3994                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3995                 return error;
3996         }
3997
3998         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3999                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4000                 return error;
4001         }
4002         error = iwm_mvm_rm_sta(sc, in);
4003         in->in_assoc = 0;
4004         iwm_mvm_update_quotas(sc, NULL);
4005         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4006                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4007                 return error;
4008         }
4009         iwm_mvm_binding_remove_vif(sc, in);
4010
4011         iwm_mvm_mac_ctxt_remove(sc, in);
4012
4013         return error;
4014 #endif
4015 }
4016
4017 static struct ieee80211_node *
4018 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4019 {
4020         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4021             M_INTWAIT | M_ZERO);
4022 }
4023
4024 static void
4025 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4026 {
4027         struct ieee80211_node *ni = &in->in_ni;
4028         struct iwm_lq_cmd *lq = &in->in_lq;
4029         int nrates = ni->ni_rates.rs_nrates;
4030         int i, ridx, tab = 0;
4031         int txant = 0;
4032
4033         if (nrates > nitems(lq->rs_table)) {
4034                 device_printf(sc->sc_dev,
4035                     "%s: node supports %d rates, driver handles "
4036                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4037                 return;
4038         }
4039         if (nrates == 0) {
4040                 device_printf(sc->sc_dev,
4041                     "%s: node supports 0 rates, odd!\n", __func__);
4042                 return;
4043         }
4044
4045         /*
4046          * XXX .. and most of iwm_node is not initialised explicitly;
4047          * it's all just 0x0 passed to the firmware.
4048          */
4049
4050         /* first figure out which rates we should support */
4051         /* XXX TODO: this isn't 11n aware /at all/ */
4052         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4053         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4054             "%s: nrates=%d\n", __func__, nrates);
4055
4056         /*
4057          * Loop over nrates and populate in_ridx from the highest
4058          * rate to the lowest rate.  Remember, in_ridx[] has
4059          * IEEE80211_RATE_MAXSIZE entries!
4060          */
4061         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4062                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4063
4064                 /* Map 802.11 rate to HW rate index. */
4065                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4066                         if (iwm_rates[ridx].rate == rate)
4067                                 break;
4068                 if (ridx > IWM_RIDX_MAX) {
4069                         device_printf(sc->sc_dev,
4070                             "%s: WARNING: device rate for %d not found!\n",
4071                             __func__, rate);
4072                 } else {
4073                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4074                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4075                             __func__,
4076                             i,
4077                             rate,
4078                             ridx);
4079                         in->in_ridx[i] = ridx;
4080                 }
4081         }
4082
4083         /* then construct a lq_cmd based on those */
4084         memset(lq, 0, sizeof(*lq));
4085         lq->sta_id = IWM_STATION_ID;
4086
4087         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4088         if (ni->ni_flags & IEEE80211_NODE_HT)
4089                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4090
4091         /*
4092          * are these used? (we don't do SISO or MIMO)
4093          * need to set them to non-zero, though, or we get an error.
4094          */
4095         lq->single_stream_ant_msk = 1;
4096         lq->dual_stream_ant_msk = 1;
4097
4098         /*
4099          * Build the actual rate selection table.
4100          * The lowest bits are the rates.  Additionally,
4101          * CCK needs bit 9 to be set.  The rest of the bits
4102          * we add to the table select the tx antenna
4103          * Note that we add the rates in the highest rate first
4104          * (opposite of ni_rates).
4105          */
4106         /*
4107          * XXX TODO: this should be looping over the min of nrates
4108          * and LQ_MAX_RETRY_NUM.  Sigh.
4109          */
4110         for (i = 0; i < nrates; i++) {
4111                 int nextant;
4112
4113                 if (txant == 0)
4114                         txant = iwm_fw_valid_tx_ant(sc);
4115                 nextant = 1<<(ffs(txant)-1);
4116                 txant &= ~nextant;
4117
4118                 /*
4119                  * Map the rate id into a rate index into
4120                  * our hardware table containing the
4121                  * configuration to use for this rate.
4122                  */
4123                 ridx = in->in_ridx[i];
4124                 tab = iwm_rates[ridx].plcp;
4125                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4126                 if (IWM_RIDX_IS_CCK(ridx))
4127                         tab |= IWM_RATE_MCS_CCK_MSK;
4128                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4129                     "station rate i=%d, rate=%d, hw=%x\n",
4130                     i, iwm_rates[ridx].rate, tab);
4131                 lq->rs_table[i] = htole32(tab);
4132         }
4133         /* then fill the rest with the lowest possible rate */
4134         for (i = nrates; i < nitems(lq->rs_table); i++) {
4135                 KASSERT(tab != 0, ("invalid tab"));
4136                 lq->rs_table[i] = htole32(tab);
4137         }
4138 }
4139
4140 static int
4141 iwm_media_change(struct ifnet *ifp)
4142 {
4143         struct ieee80211vap *vap = ifp->if_softc;
4144         struct ieee80211com *ic = vap->iv_ic;
4145         struct iwm_softc *sc = ic->ic_softc;
4146         int error;
4147
4148         error = ieee80211_media_change(ifp);
4149         if (error != ENETRESET)
4150                 return error;
4151
4152         IWM_LOCK(sc);
4153         if (ic->ic_nrunning > 0) {
4154                 iwm_stop(sc);
4155                 iwm_init(sc);
4156         }
4157         IWM_UNLOCK(sc);
4158         return error;
4159 }
4160
4161
4162 static int
4163 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4164 {
4165         struct iwm_vap *ivp = IWM_VAP(vap);
4166         struct ieee80211com *ic = vap->iv_ic;
4167         struct iwm_softc *sc = ic->ic_softc;
4168         struct iwm_node *in;
4169         int error;
4170
4171         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4172             "switching state %s -> %s\n",
4173             ieee80211_state_name[vap->iv_state],
4174             ieee80211_state_name[nstate]);
4175         IEEE80211_UNLOCK(ic);
4176         IWM_LOCK(sc);
4177
4178         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4179                 iwm_led_blink_stop(sc);
4180
4181         /* disable beacon filtering if we're hopping out of RUN */
4182         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4183                 iwm_mvm_disable_beacon_filter(sc);
4184
4185                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4186                         in->in_assoc = 0;
4187
4188                 iwm_release(sc, NULL);
4189
4190                 /*
4191                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4192                  * above then the card will be completely reinitialized,
4193                  * so the driver must do everything necessary to bring the card
4194                  * from INIT to SCAN.
4195                  *
4196                  * Additionally, upon receiving deauth frame from AP,
4197                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4198                  * state. This will also fail with this driver, so bring the FSM
4199                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4200                  *
4201                  * XXX TODO: fix this for FreeBSD!
4202                  */
4203                 if (nstate == IEEE80211_S_SCAN ||
4204                     nstate == IEEE80211_S_AUTH ||
4205                     nstate == IEEE80211_S_ASSOC) {
4206                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4207                             "Force transition to INIT; MGT=%d\n", arg);
4208                         IWM_UNLOCK(sc);
4209                         IEEE80211_LOCK(ic);
4210                         /* Always pass arg as -1 since we can't Tx right now. */
4211                         /*
4212                          * XXX arg is just ignored anyway when transitioning
4213                          *     to IEEE80211_S_INIT.
4214                          */
4215                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4216                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4217                             "Going INIT->SCAN\n");
4218                         nstate = IEEE80211_S_SCAN;
4219                         IEEE80211_UNLOCK(ic);
4220                         IWM_LOCK(sc);
4221                 }
4222         }
4223
4224         switch (nstate) {
4225         case IEEE80211_S_INIT:
4226                 break;
4227
4228         case IEEE80211_S_AUTH:
4229                 if ((error = iwm_auth(vap, sc)) != 0) {
4230                         device_printf(sc->sc_dev,
4231                             "%s: could not move to auth state: %d\n",
4232                             __func__, error);
4233                         break;
4234                 }
4235                 break;
4236
4237         case IEEE80211_S_ASSOC:
4238                 if ((error = iwm_assoc(vap, sc)) != 0) {
4239                         device_printf(sc->sc_dev,
4240                             "%s: failed to associate: %d\n", __func__,
4241                             error);
4242                         break;
4243                 }
4244                 break;
4245
4246         case IEEE80211_S_RUN:
4247         {
4248                 struct iwm_host_cmd cmd = {
4249                         .id = IWM_LQ_CMD,
4250                         .len = { sizeof(in->in_lq), },
4251                         .flags = IWM_CMD_SYNC,
4252                 };
4253
4254                 /* Update the association state, now we have it all */
4255                 /* (eg associd comes in at this point */
4256                 error = iwm_assoc(vap, sc);
4257                 if (error != 0) {
4258                         device_printf(sc->sc_dev,
4259                             "%s: failed to update association state: %d\n",
4260                             __func__,
4261                             error);
4262                         break;
4263                 }
4264
4265                 in = IWM_NODE(vap->iv_bss);
4266                 iwm_mvm_power_mac_update_mode(sc, in);
4267                 iwm_mvm_enable_beacon_filter(sc, in);
4268                 iwm_mvm_update_quotas(sc, in);
4269                 iwm_setrates(sc, in);
4270
4271                 cmd.data[0] = &in->in_lq;
4272                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4273                         device_printf(sc->sc_dev,
4274                             "%s: IWM_LQ_CMD failed\n", __func__);
4275                 }
4276
4277                 iwm_mvm_led_enable(sc);
4278                 break;
4279         }
4280
4281         default:
4282                 break;
4283         }
4284         IWM_UNLOCK(sc);
4285         IEEE80211_LOCK(ic);
4286
4287         return (ivp->iv_newstate(vap, nstate, arg));
4288 }
4289
4290 void
4291 iwm_endscan_cb(void *arg, int pending)
4292 {
4293         struct iwm_softc *sc = arg;
4294         struct ieee80211com *ic = &sc->sc_ic;
4295
4296         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4297             "%s: scan ended\n",
4298             __func__);
4299
4300         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4301 }
4302
4303 /*
4304  * Aging and idle timeouts for the different possible scenarios
4305  * in default configuration
4306  */
4307 static const uint32_t
4308 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4309         {
4310                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4311                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4312         },
4313         {
4314                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4315                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4316         },
4317         {
4318                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4319                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4320         },
4321         {
4322                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4323                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4324         },
4325         {
4326                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4327                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4328         },
4329 };
4330
4331 /*
4332  * Aging and idle timeouts for the different possible scenarios
4333  * in single BSS MAC configuration.
4334  */
4335 static const uint32_t
4336 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4337         {
4338                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4339                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4340         },
4341         {
4342                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4343                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4344         },
4345         {
4346                 htole32(IWM_SF_MCAST_AGING_TIMER),
4347                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4348         },
4349         {
4350                 htole32(IWM_SF_BA_AGING_TIMER),
4351                 htole32(IWM_SF_BA_IDLE_TIMER)
4352         },
4353         {
4354                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4355                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4356         },
4357 };
4358
4359 static void
4360 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4361     struct ieee80211_node *ni)
4362 {
4363         int i, j, watermark;
4364
4365         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4366
4367         /*
4368          * If we are in association flow - check antenna configuration
4369          * capabilities of the AP station, and choose the watermark accordingly.
4370          */
4371         if (ni) {
4372                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4373 #ifdef notyet
4374                         if (ni->ni_rxmcs[2] != 0)
4375                                 watermark = IWM_SF_W_MARK_MIMO3;
4376                         else if (ni->ni_rxmcs[1] != 0)
4377                                 watermark = IWM_SF_W_MARK_MIMO2;
4378                         else
4379 #endif
4380                                 watermark = IWM_SF_W_MARK_SISO;
4381                 } else {
4382                         watermark = IWM_SF_W_MARK_LEGACY;
4383                 }
4384         /* default watermark value for unassociated mode. */
4385         } else {
4386                 watermark = IWM_SF_W_MARK_MIMO2;
4387         }
4388         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4389
4390         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4391                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4392                         sf_cmd->long_delay_timeouts[i][j] =
4393                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4394                 }
4395         }
4396
4397         if (ni) {
4398                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4399                        sizeof(iwm_sf_full_timeout));
4400         } else {
4401                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4402                        sizeof(iwm_sf_full_timeout_def));
4403         }
4404 }
4405
4406 static int
4407 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4408 {
4409         struct ieee80211com *ic = &sc->sc_ic;
4410         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4411         struct iwm_sf_cfg_cmd sf_cmd = {
4412                 .state = htole32(IWM_SF_FULL_ON),
4413         };
4414         int ret = 0;
4415
4416         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4417                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4418
4419         switch (new_state) {
4420         case IWM_SF_UNINIT:
4421         case IWM_SF_INIT_OFF:
4422                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4423                 break;
4424         case IWM_SF_FULL_ON:
4425                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4426                 break;
4427         default:
4428                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4429                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4430                           new_state);
4431                 return EINVAL;
4432         }
4433
4434         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4435                                    sizeof(sf_cmd), &sf_cmd);
4436         return ret;
4437 }
4438
4439 static int
4440 iwm_send_bt_init_conf(struct iwm_softc *sc)
4441 {
4442         struct iwm_bt_coex_cmd bt_cmd;
4443
4444         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4445         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4446
4447         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4448             &bt_cmd);
4449 }
4450
4451 static int
4452 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4453 {
4454         struct iwm_mcc_update_cmd mcc_cmd;
4455         struct iwm_host_cmd hcmd = {
4456                 .id = IWM_MCC_UPDATE_CMD,
4457                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4458                 .data = { &mcc_cmd },
4459         };
4460         int ret;
4461 #ifdef IWM_DEBUG
4462         struct iwm_rx_packet *pkt;
4463         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4464         struct iwm_mcc_update_resp *mcc_resp;
4465         int n_channels;
4466         uint16_t mcc;
4467 #endif
4468         int resp_v2 = isset(sc->sc_enabled_capa,
4469             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4470
4471         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4472         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4473         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4474             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4475                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4476         else
4477                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4478
4479         if (resp_v2)
4480                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4481         else
4482                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4483
4484         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4485             "send MCC update to FW with '%c%c' src = %d\n",
4486             alpha2[0], alpha2[1], mcc_cmd.source_id);
4487
4488         ret = iwm_send_cmd(sc, &hcmd);
4489         if (ret)
4490                 return ret;
4491
4492 #ifdef IWM_DEBUG
4493         pkt = hcmd.resp_pkt;
4494
4495         /* Extract MCC response */
4496         if (resp_v2) {
4497                 mcc_resp = (void *)pkt->data;
4498                 mcc = mcc_resp->mcc;
4499                 n_channels =  le32toh(mcc_resp->n_channels);
4500         } else {
4501                 mcc_resp_v1 = (void *)pkt->data;
4502                 mcc = mcc_resp_v1->mcc;
4503                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4504         }
4505
4506         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4507         if (mcc == 0)
4508                 mcc = 0x3030;  /* "00" - world */
4509
4510         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4511             "regulatory domain '%c%c' (%d channels available)\n",
4512             mcc >> 8, mcc & 0xff, n_channels);
4513 #endif
4514         iwm_free_resp(sc, &hcmd);
4515
4516         return 0;
4517 }
4518
4519 static void
4520 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4521 {
4522         struct iwm_host_cmd cmd = {
4523                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4524                 .len = { sizeof(uint32_t), },
4525                 .data = { &backoff, },
4526         };
4527
4528         if (iwm_send_cmd(sc, &cmd) != 0) {
4529                 device_printf(sc->sc_dev,
4530                     "failed to change thermal tx backoff\n");
4531         }
4532 }
4533
4534 static int
4535 iwm_init_hw(struct iwm_softc *sc)
4536 {
4537         struct ieee80211com *ic = &sc->sc_ic;
4538         int error, i, ac;
4539
4540         if ((error = iwm_start_hw(sc)) != 0) {
4541                 kprintf("iwm_start_hw: failed %d\n", error);
4542                 return error;
4543         }
4544
4545         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4546                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4547                 return error;
4548         }
4549
4550         /*
4551          * should stop and start HW since that INIT
4552          * image just loaded
4553          */
4554         iwm_stop_device(sc);
4555         if ((error = iwm_start_hw(sc)) != 0) {
4556                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4557                 return error;
4558         }
4559
4560         /* omstart, this time with the regular firmware */
4561         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4562         if (error) {
4563                 device_printf(sc->sc_dev, "could not load firmware\n");
4564                 goto error;
4565         }
4566
4567         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4568                 device_printf(sc->sc_dev, "bt init conf failed\n");
4569                 goto error;
4570         }
4571
4572         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4573                 device_printf(sc->sc_dev, "antenna config failed\n");
4574                 goto error;
4575         }
4576
4577         /* Send phy db control command and then phy db calibration*/
4578         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4579                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4580                 goto error;
4581         }
4582
4583         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4584                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4585                 goto error;
4586         }
4587
4588         /* Add auxiliary station for scanning */
4589         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4590                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4591                 goto error;
4592         }
4593
4594         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4595                 /*
4596                  * The channel used here isn't relevant as it's
4597                  * going to be overwritten in the other flows.
4598                  * For now use the first channel we have.
4599                  */
4600                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4601                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4602                         goto error;
4603         }
4604
4605         /* Initialize tx backoffs to the minimum. */
4606         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4607                 iwm_mvm_tt_tx_backoff(sc, 0);
4608
4609         error = iwm_mvm_power_update_device(sc);
4610         if (error)
4611                 goto error;
4612
4613         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4614                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4615                         goto error;
4616         }
4617
4618         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4619                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4620                         goto error;
4621         }
4622
4623         /* Enable Tx queues. */
4624         for (ac = 0; ac < WME_NUM_AC; ac++) {
4625                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4626                     iwm_mvm_ac_to_tx_fifo[ac]);
4627                 if (error)
4628                         goto error;
4629         }
4630
4631         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4632                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4633                 goto error;
4634         }
4635
4636         return 0;
4637
4638  error:
4639         iwm_stop_device(sc);
4640         return error;
4641 }
4642
4643 /* Allow multicast from our BSSID. */
4644 static int
4645 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4646 {
4647         struct ieee80211_node *ni = vap->iv_bss;
4648         struct iwm_mcast_filter_cmd *cmd;
4649         size_t size;
4650         int error;
4651
4652         size = roundup(sizeof(*cmd), 4);
4653         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4654         if (cmd == NULL)
4655                 return ENOMEM;
4656         cmd->filter_own = 1;
4657         cmd->port_id = 0;
4658         cmd->count = 0;
4659         cmd->pass_all = 1;
4660         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4661
4662         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4663             IWM_CMD_SYNC, size, cmd);
4664         kfree(cmd, M_DEVBUF);
4665
4666         return (error);
4667 }
4668
4669 /*
4670  * ifnet interfaces
4671  */
4672
4673 static void
4674 iwm_init(struct iwm_softc *sc)
4675 {
4676         int error;
4677
4678         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4679                 return;
4680         }
4681         sc->sc_generation++;
4682         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4683
4684         if ((error = iwm_init_hw(sc)) != 0) {
4685                 kprintf("iwm_init_hw failed %d\n", error);
4686                 iwm_stop(sc);
4687                 return;
4688         }
4689
4690         /*
4691          * Ok, firmware loaded and we are jogging
4692          */
4693         sc->sc_flags |= IWM_FLAG_HW_INITED;
4694         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4695 }
4696
4697 static int
4698 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4699 {
4700         struct iwm_softc *sc;
4701         int error;
4702
4703         sc = ic->ic_softc;
4704
4705         IWM_LOCK(sc);
4706         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4707                 IWM_UNLOCK(sc);
4708                 return (ENXIO);
4709         }
4710         error = mbufq_enqueue(&sc->sc_snd, m);
4711         if (error) {
4712                 IWM_UNLOCK(sc);
4713                 return (error);
4714         }
4715         iwm_start(sc);
4716         IWM_UNLOCK(sc);
4717         return (0);
4718 }
4719
4720 /*
4721  * Dequeue packets from sendq and call send.
4722  */
4723 static void
4724 iwm_start(struct iwm_softc *sc)
4725 {
4726         struct ieee80211_node *ni;
4727         struct mbuf *m;
4728         int ac = 0;
4729
4730         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4731         while (sc->qfullmsk == 0 &&
4732                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4733                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4734                 if (iwm_tx(sc, m, ni, ac) != 0) {
4735                         if_inc_counter(ni->ni_vap->iv_ifp,
4736                             IFCOUNTER_OERRORS, 1);
4737                         ieee80211_free_node(ni);
4738                         continue;
4739                 }
4740                 sc->sc_tx_timer = 15;
4741         }
4742         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4743 }
4744
4745 static void
4746 iwm_stop(struct iwm_softc *sc)
4747 {
4748
4749         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4750         sc->sc_flags |= IWM_FLAG_STOPPED;
4751         sc->sc_generation++;
4752         iwm_led_blink_stop(sc);
4753         sc->sc_tx_timer = 0;
4754         iwm_stop_device(sc);
4755 }
4756
4757 static void
4758 iwm_watchdog(void *arg)
4759 {
4760         struct iwm_softc *sc = arg;
4761
4762         if (sc->sc_tx_timer > 0) {
4763                 if (--sc->sc_tx_timer == 0) {
4764                         device_printf(sc->sc_dev, "device timeout\n");
4765 #ifdef IWM_DEBUG
4766                         iwm_nic_error(sc);
4767 #endif
4768                         iwm_stop(sc);
4769 #if defined(__DragonFly__)
4770                         ++sc->sc_ic.ic_oerrors;
4771 #else
4772                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4773 #endif
4774                         return;
4775                 }
4776         }
4777         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4778 }
4779
4780 static void
4781 iwm_parent(struct ieee80211com *ic)
4782 {
4783         struct iwm_softc *sc = ic->ic_softc;
4784         int startall = 0;
4785
4786         IWM_LOCK(sc);
4787         if (ic->ic_nrunning > 0) {
4788                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4789                         iwm_init(sc);
4790                         startall = 1;
4791                 }
4792         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4793                 iwm_stop(sc);
4794         IWM_UNLOCK(sc);
4795         if (startall)
4796                 ieee80211_start_all(ic);
4797 }
4798
4799 /*
4800  * The interrupt side of things
4801  */
4802
4803 /*
4804  * error dumping routines are from iwlwifi/mvm/utils.c
4805  */
4806
4807 /*
4808  * Note: This structure is read from the device with IO accesses,
4809  * and the reading already does the endian conversion. As it is
4810  * read with uint32_t-sized accesses, any members with a different size
4811  * need to be ordered correctly though!
4812  */
4813 struct iwm_error_event_table {
4814         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4815         uint32_t error_id;              /* type of error */
4816         uint32_t trm_hw_status0;        /* TRM HW status */
4817         uint32_t trm_hw_status1;        /* TRM HW status */
4818         uint32_t blink2;                /* branch link */
4819         uint32_t ilink1;                /* interrupt link */
4820         uint32_t ilink2;                /* interrupt link */
4821         uint32_t data1;         /* error-specific data */
4822         uint32_t data2;         /* error-specific data */
4823         uint32_t data3;         /* error-specific data */
4824         uint32_t bcon_time;             /* beacon timer */
4825         uint32_t tsf_low;               /* network timestamp function timer */
4826         uint32_t tsf_hi;                /* network timestamp function timer */
4827         uint32_t gp1;           /* GP1 timer register */
4828         uint32_t gp2;           /* GP2 timer register */
4829         uint32_t fw_rev_type;   /* firmware revision type */
4830         uint32_t major;         /* uCode version major */
4831         uint32_t minor;         /* uCode version minor */
4832         uint32_t hw_ver;                /* HW Silicon version */
4833         uint32_t brd_ver;               /* HW board version */
4834         uint32_t log_pc;                /* log program counter */
4835         uint32_t frame_ptr;             /* frame pointer */
4836         uint32_t stack_ptr;             /* stack pointer */
4837         uint32_t hcmd;          /* last host command header */
4838         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4839                                  * rxtx_flag */
4840         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4841                                  * host_flag */
4842         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4843                                  * enc_flag */
4844         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4845                                  * time_flag */
4846         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4847                                  * wico interrupt */
4848         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4849         uint32_t wait_event;            /* wait event() caller address */
4850         uint32_t l2p_control;   /* L2pControlField */
4851         uint32_t l2p_duration;  /* L2pDurationField */
4852         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4853         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4854         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4855                                  * (LMPM_PMG_SEL) */
4856         uint32_t u_timestamp;   /* indicate when the date and time of the
4857                                  * compilation */
4858         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4859 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4860
4861 /*
4862  * UMAC error struct - relevant starting from family 8000 chip.
4863  * Note: This structure is read from the device with IO accesses,
4864  * and the reading already does the endian conversion. As it is
4865  * read with u32-sized accesses, any members with a different size
4866  * need to be ordered correctly though!
4867  */
4868 struct iwm_umac_error_event_table {
4869         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4870         uint32_t error_id;      /* type of error */
4871         uint32_t blink1;        /* branch link */
4872         uint32_t blink2;        /* branch link */
4873         uint32_t ilink1;        /* interrupt link */
4874         uint32_t ilink2;        /* interrupt link */
4875         uint32_t data1;         /* error-specific data */
4876         uint32_t data2;         /* error-specific data */
4877         uint32_t data3;         /* error-specific data */
4878         uint32_t umac_major;
4879         uint32_t umac_minor;
4880         uint32_t frame_pointer; /* core register 27*/
4881         uint32_t stack_pointer; /* core register 28 */
4882         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4883         uint32_t nic_isr_pref;  /* ISR status register */
4884 } __packed;
4885
4886 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4887 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4888
4889 #ifdef IWM_DEBUG
4890 struct {
4891         const char *name;
4892         uint8_t num;
4893 } advanced_lookup[] = {
4894         { "NMI_INTERRUPT_WDG", 0x34 },
4895         { "SYSASSERT", 0x35 },
4896         { "UCODE_VERSION_MISMATCH", 0x37 },
4897         { "BAD_COMMAND", 0x38 },
4898         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4899         { "FATAL_ERROR", 0x3D },
4900         { "NMI_TRM_HW_ERR", 0x46 },
4901         { "NMI_INTERRUPT_TRM", 0x4C },
4902         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4903         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4904         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4905         { "NMI_INTERRUPT_HOST", 0x66 },
4906         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4907         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4908         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4909         { "ADVANCED_SYSASSERT", 0 },
4910 };
4911
4912 static const char *
4913 iwm_desc_lookup(uint32_t num)
4914 {
4915         int i;
4916
4917         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4918                 if (advanced_lookup[i].num == num)
4919                         return advanced_lookup[i].name;
4920
4921         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4922         return advanced_lookup[i].name;
4923 }
4924
4925 static void
4926 iwm_nic_umac_error(struct iwm_softc *sc)
4927 {
4928         struct iwm_umac_error_event_table table;
4929         uint32_t base;
4930
4931         base = sc->sc_uc.uc_umac_error_event_table;
4932
4933         if (base < 0x800000) {
4934                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4935                     base);
4936                 return;
4937         }
4938
4939         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4940                 device_printf(sc->sc_dev, "reading errlog failed\n");
4941                 return;
4942         }
4943
4944         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4945                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4946                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4947                     sc->sc_flags, table.valid);
4948         }
4949
4950         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4951                 iwm_desc_lookup(table.error_id));
4952         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4953         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4954         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4955             table.ilink1);
4956         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4957             table.ilink2);
4958         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4959         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4960         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4961         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4962         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4963         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4964             table.frame_pointer);
4965         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4966             table.stack_pointer);
4967         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4968         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4969             table.nic_isr_pref);
4970 }
4971
4972 /*
4973  * Support for dumping the error log seemed like a good idea ...
4974  * but it's mostly hex junk and the only sensible thing is the
4975  * hw/ucode revision (which we know anyway).  Since it's here,
4976  * I'll just leave it in, just in case e.g. the Intel guys want to
4977  * help us decipher some "ADVANCED_SYSASSERT" later.
4978  */
4979 static void
4980 iwm_nic_error(struct iwm_softc *sc)
4981 {
4982         struct iwm_error_event_table table;
4983         uint32_t base;
4984
4985         device_printf(sc->sc_dev, "dumping device error log\n");
4986         base = sc->sc_uc.uc_error_event_table;
4987         if (base < 0x800000) {
4988                 device_printf(sc->sc_dev,
4989                     "Invalid error log pointer 0x%08x\n", base);
4990                 return;
4991         }
4992
4993         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4994                 device_printf(sc->sc_dev, "reading errlog failed\n");
4995                 return;
4996         }
4997
4998         if (!table.valid) {
4999                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5000                 return;
5001         }
5002
5003         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5004                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5005                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5006                     sc->sc_flags, table.valid);
5007         }
5008
5009         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5010             iwm_desc_lookup(table.error_id));
5011         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5012             table.trm_hw_status0);
5013         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5014             table.trm_hw_status1);
5015         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5016         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5017         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5018         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5019         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5020         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5021         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5022         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5023         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5024         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5025         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5026         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5027             table.fw_rev_type);
5028         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5029         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5030         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5031         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5032         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5033         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5034         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5035         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5036         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5037         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5038         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5039         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5040         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5041         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5042         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5043         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5044         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5045         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5046         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5047
5048         if (sc->sc_uc.uc_umac_error_event_table)
5049                 iwm_nic_umac_error(sc);
5050 }
5051 #endif
5052
5053 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
5054 do {                                                                    \
5055         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5056         _var_ = (void *)((_pkt_)+1);                                    \
5057 } while (/*CONSTCOND*/0)
5058
5059 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
5060 do {                                                                    \
5061         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5062         _ptr_ = (void *)((_pkt_)+1);                                    \
5063 } while (/*CONSTCOND*/0)
5064
5065 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5066
5067 /*
5068  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5069  * Basic structure from if_iwn
5070  */
5071 static void
5072 iwm_notif_intr(struct iwm_softc *sc)
5073 {
5074         uint16_t hw;
5075
5076         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5077             BUS_DMASYNC_POSTREAD);
5078
5079         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5080
5081         /*
5082          * Process responses
5083          */
5084         while (sc->rxq.cur != hw) {
5085                 struct iwm_rx_ring *ring = &sc->rxq;
5086                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5087                 struct iwm_rx_packet *pkt;
5088                 struct iwm_cmd_response *cresp;
5089                 int qid, idx, code;
5090
5091                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5092                     BUS_DMASYNC_POSTREAD);
5093                 pkt = mtod(data->m, struct iwm_rx_packet *);
5094
5095                 qid = pkt->hdr.qid & ~0x80;
5096                 idx = pkt->hdr.idx;
5097
5098                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5099                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5100                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5101                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5102
5103                 /*
5104                  * randomly get these from the firmware, no idea why.
5105                  * they at least seem harmless, so just ignore them for now
5106                  */
5107                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5108                     || pkt->len_n_flags == htole32(0x55550000))) {
5109                         ADVANCE_RXQ(sc);
5110                         continue;
5111                 }
5112
5113                 switch (code) {
5114                 case IWM_REPLY_RX_PHY_CMD:
5115                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5116                         break;
5117
5118                 case IWM_REPLY_RX_MPDU_CMD:
5119                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5120                         break;
5121
5122                 case IWM_TX_CMD:
5123                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5124                         break;
5125
5126                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5127                         struct iwm_missed_beacons_notif *resp;
5128                         int missed;
5129
5130                         /* XXX look at mac_id to determine interface ID */
5131                         struct ieee80211com *ic = &sc->sc_ic;
5132                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5133
5134                         SYNC_RESP_STRUCT(resp, pkt);
5135                         missed = le32toh(resp->consec_missed_beacons);
5136
5137                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5138                             "%s: MISSED_BEACON: mac_id=%d, "
5139                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5140                             "num_rx=%d\n",
5141                             __func__,
5142                             le32toh(resp->mac_id),
5143                             le32toh(resp->consec_missed_beacons_since_last_rx),
5144                             le32toh(resp->consec_missed_beacons),
5145                             le32toh(resp->num_expected_beacons),
5146                             le32toh(resp->num_recvd_beacons));
5147
5148                         /* Be paranoid */
5149                         if (vap == NULL)
5150                                 break;
5151
5152                         /* XXX no net80211 locking? */
5153                         if (vap->iv_state == IEEE80211_S_RUN &&
5154                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5155                                 if (missed > vap->iv_bmissthreshold) {
5156                                         /* XXX bad locking; turn into task */
5157                                         IWM_UNLOCK(sc);
5158                                         ieee80211_beacon_miss(ic);
5159                                         IWM_LOCK(sc);
5160                                 }
5161                         }
5162
5163                         break; }
5164
5165                 case IWM_MFUART_LOAD_NOTIFICATION:
5166                         break;
5167
5168                 case IWM_MVM_ALIVE: {
5169                         struct iwm_mvm_alive_resp_v1 *resp1;
5170                         struct iwm_mvm_alive_resp_v2 *resp2;
5171                         struct iwm_mvm_alive_resp_v3 *resp3;
5172
5173                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5174                                 SYNC_RESP_STRUCT(resp1, pkt);
5175                                 sc->sc_uc.uc_error_event_table
5176                                     = le32toh(resp1->error_event_table_ptr);
5177                                 sc->sc_uc.uc_log_event_table
5178                                     = le32toh(resp1->log_event_table_ptr);
5179                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5180                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5181                                         sc->sc_uc.uc_ok = 1;
5182                                 else
5183                                         sc->sc_uc.uc_ok = 0;
5184                         }
5185
5186                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5187                                 SYNC_RESP_STRUCT(resp2, pkt);
5188                                 sc->sc_uc.uc_error_event_table
5189                                     = le32toh(resp2->error_event_table_ptr);
5190                                 sc->sc_uc.uc_log_event_table
5191                                     = le32toh(resp2->log_event_table_ptr);
5192                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5193                                 sc->sc_uc.uc_umac_error_event_table
5194                                     = le32toh(resp2->error_info_addr);
5195                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5196                                         sc->sc_uc.uc_ok = 1;
5197                                 else
5198                                         sc->sc_uc.uc_ok = 0;
5199                         }
5200
5201                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5202                                 SYNC_RESP_STRUCT(resp3, pkt);
5203                                 sc->sc_uc.uc_error_event_table
5204                                     = le32toh(resp3->error_event_table_ptr);
5205                                 sc->sc_uc.uc_log_event_table
5206                                     = le32toh(resp3->log_event_table_ptr);
5207                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5208                                 sc->sc_uc.uc_umac_error_event_table
5209                                     = le32toh(resp3->error_info_addr);
5210                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5211                                         sc->sc_uc.uc_ok = 1;
5212                                 else
5213                                         sc->sc_uc.uc_ok = 0;
5214                         }
5215
5216                         sc->sc_uc.uc_intr = 1;
5217                         wakeup(&sc->sc_uc);
5218                         break; }
5219
5220                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5221                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5222                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5223
5224                         iwm_phy_db_set_section(sc, phy_db_notif);
5225
5226                         break; }
5227
5228                 case IWM_STATISTICS_NOTIFICATION: {
5229                         struct iwm_notif_statistics *stats;
5230                         SYNC_RESP_STRUCT(stats, pkt);
5231                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5232                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
5233                         break; }
5234
5235                 case IWM_NVM_ACCESS_CMD:
5236                 case IWM_MCC_UPDATE_CMD:
5237                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5238                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5239                                     BUS_DMASYNC_POSTREAD);
5240                                 memcpy(sc->sc_cmd_resp,
5241                                     pkt, sizeof(sc->sc_cmd_resp));
5242                         }
5243                         break;
5244
5245                 case IWM_MCC_CHUB_UPDATE_CMD: {
5246                         struct iwm_mcc_chub_notif *notif;
5247                         SYNC_RESP_STRUCT(notif, pkt);
5248
5249                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5250                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5251                         sc->sc_fw_mcc[2] = '\0';
5252                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5253                             "fw source %d sent CC '%s'\n",
5254                             notif->source_id, sc->sc_fw_mcc);
5255                         break; }
5256
5257                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5258                         break;
5259
5260                 case IWM_PHY_CONFIGURATION_CMD:
5261                 case IWM_TX_ANT_CONFIGURATION_CMD:
5262                 case IWM_ADD_STA:
5263                 case IWM_MAC_CONTEXT_CMD:
5264                 case IWM_REPLY_SF_CFG_CMD:
5265                 case IWM_POWER_TABLE_CMD:
5266                 case IWM_PHY_CONTEXT_CMD:
5267                 case IWM_BINDING_CONTEXT_CMD:
5268                 case IWM_TIME_EVENT_CMD:
5269                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5270                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5271                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5272                 case IWM_REPLY_BEACON_FILTERING_CMD:
5273                 case IWM_MAC_PM_POWER_TABLE:
5274                 case IWM_TIME_QUOTA_CMD:
5275                 case IWM_REMOVE_STA:
5276                 case IWM_TXPATH_FLUSH:
5277                 case IWM_LQ_CMD:
5278                 case IWM_BT_CONFIG:
5279                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5280                         SYNC_RESP_STRUCT(cresp, pkt);
5281                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5282                                 memcpy(sc->sc_cmd_resp,
5283                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5284                         }
5285                         break;
5286
5287                 /* ignore */
5288                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5289                         break;
5290
5291                 case IWM_INIT_COMPLETE_NOTIF:
5292                         sc->sc_init_complete = 1;
5293                         wakeup(&sc->sc_init_complete);
5294                         break;
5295
5296                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5297                         struct iwm_periodic_scan_complete *notif;
5298                         SYNC_RESP_STRUCT(notif, pkt);
5299
5300                         break; }
5301
5302                 case IWM_SCAN_ITERATION_COMPLETE: {
5303                         struct iwm_lmac_scan_complete_notif *notif;
5304                         SYNC_RESP_STRUCT(notif, pkt);
5305                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5306                         break; }
5307
5308                 case IWM_SCAN_COMPLETE_UMAC: {
5309                         struct iwm_umac_scan_complete *notif;
5310                         SYNC_RESP_STRUCT(notif, pkt);
5311
5312                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5313                             "UMAC scan complete, status=0x%x\n",
5314                             notif->status);
5315 #if 0   /* XXX This would be a duplicate scan end call */
5316                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5317 #endif
5318                         break;
5319                 }
5320
5321                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5322                         struct iwm_umac_scan_iter_complete_notif *notif;
5323                         SYNC_RESP_STRUCT(notif, pkt);
5324
5325                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5326                             "complete, status=0x%x, %d channels scanned\n",
5327                             notif->status, notif->scanned_channels);
5328                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5329                         break;
5330                 }
5331
5332                 case IWM_REPLY_ERROR: {
5333                         struct iwm_error_resp *resp;
5334                         SYNC_RESP_STRUCT(resp, pkt);
5335
5336                         device_printf(sc->sc_dev,
5337                             "firmware error 0x%x, cmd 0x%x\n",
5338                             le32toh(resp->error_type),
5339                             resp->cmd_id);
5340                         break; }
5341
5342                 case IWM_TIME_EVENT_NOTIFICATION: {
5343                         struct iwm_time_event_notif *notif;
5344                         SYNC_RESP_STRUCT(notif, pkt);
5345
5346                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5347                             "TE notif status = 0x%x action = 0x%x\n",
5348                             notif->status, notif->action);
5349                         break; }
5350
5351                 case IWM_MCAST_FILTER_CMD:
5352                         break;
5353
5354                 case IWM_SCD_QUEUE_CFG: {
5355                         struct iwm_scd_txq_cfg_rsp *rsp;
5356                         SYNC_RESP_STRUCT(rsp, pkt);
5357
5358                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5359                             "queue cfg token=0x%x sta_id=%d "
5360                             "tid=%d scd_queue=%d\n",
5361                             rsp->token, rsp->sta_id, rsp->tid,
5362                             rsp->scd_queue);
5363                         break;
5364                 }
5365
5366                 default:
5367                         device_printf(sc->sc_dev,
5368                             "frame %d/%d %x UNHANDLED (this should "
5369                             "not happen)\n", qid, idx,
5370                             pkt->len_n_flags);
5371                         break;
5372                 }
5373
5374                 /*
5375                  * Why test bit 0x80?  The Linux driver:
5376                  *
5377                  * There is one exception:  uCode sets bit 15 when it
5378                  * originates the response/notification, i.e. when the
5379                  * response/notification is not a direct response to a
5380                  * command sent by the driver.  For example, uCode issues
5381                  * IWM_REPLY_RX when it sends a received frame to the driver;
5382                  * it is not a direct response to any driver command.
5383                  *
5384                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5385                  * uses a slightly different format for pkt->hdr, and "qid"
5386                  * is actually the upper byte of a two-byte field.
5387                  */
5388                 if (!(pkt->hdr.qid & (1 << 7))) {
5389                         iwm_cmd_done(sc, pkt);
5390                 }
5391
5392                 ADVANCE_RXQ(sc);
5393         }
5394
5395         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5396             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5397
5398         /*
5399          * Tell the firmware what we have processed.
5400          * Seems like the hardware gets upset unless we align
5401          * the write by 8??
5402          */
5403         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5404         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5405 }
5406
5407 static void
5408 iwm_intr(void *arg)
5409 {
5410         struct iwm_softc *sc = arg;
5411         int handled = 0;
5412         int r1, r2, rv = 0;
5413         int isperiodic = 0;
5414
5415 #if defined(__DragonFly__)
5416         if (sc->sc_mem == NULL) {
5417                 kprintf("iwm_intr: detached\n");
5418                 return;
5419         }
5420 #endif
5421         IWM_LOCK(sc);
5422         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5423
5424         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5425                 uint32_t *ict = sc->ict_dma.vaddr;
5426                 int tmp;
5427
5428                 tmp = htole32(ict[sc->ict_cur]);
5429                 if (!tmp)
5430                         goto out_ena;
5431
5432                 /*
5433                  * ok, there was something.  keep plowing until we have all.
5434                  */
5435                 r1 = r2 = 0;
5436                 while (tmp) {
5437                         r1 |= tmp;
5438                         ict[sc->ict_cur] = 0;
5439                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5440                         tmp = htole32(ict[sc->ict_cur]);
5441                 }
5442
5443                 /* this is where the fun begins.  don't ask */
5444                 if (r1 == 0xffffffff)
5445                         r1 = 0;
5446
5447                 /* i am not expected to understand this */
5448                 if (r1 & 0xc0000)
5449                         r1 |= 0x8000;
5450                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5451         } else {
5452                 r1 = IWM_READ(sc, IWM_CSR_INT);
5453                 /* "hardware gone" (where, fishing?) */
5454                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5455                         goto out;
5456                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5457         }
5458         if (r1 == 0 && r2 == 0) {
5459                 goto out_ena;
5460         }
5461
5462         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5463
5464         /* ignored */
5465         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5466
5467         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5468                 int i;
5469                 struct ieee80211com *ic = &sc->sc_ic;
5470                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5471
5472 #ifdef IWM_DEBUG
5473                 iwm_nic_error(sc);
5474 #endif
5475                 /* Dump driver status (TX and RX rings) while we're here. */
5476                 device_printf(sc->sc_dev, "driver status:\n");
5477                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5478                         struct iwm_tx_ring *ring = &sc->txq[i];
5479                         device_printf(sc->sc_dev,
5480                             "  tx ring %2d: qid=%-2d cur=%-3d "
5481                             "queued=%-3d\n",
5482                             i, ring->qid, ring->cur, ring->queued);
5483                 }
5484                 device_printf(sc->sc_dev,
5485                     "  rx ring: cur=%d\n", sc->rxq.cur);
5486                 device_printf(sc->sc_dev,
5487                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5488
5489                 /* Don't stop the device; just do a VAP restart */
5490                 IWM_UNLOCK(sc);
5491
5492                 if (vap == NULL) {
5493                         kprintf("%s: null vap\n", __func__);
5494                         return;
5495                 }
5496
5497                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5498                     "restarting\n", __func__, vap->iv_state);
5499
5500                 /* XXX TODO: turn this into a callout/taskqueue */
5501                 ieee80211_restart_all(ic);
5502                 return;
5503         }
5504
5505         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5506                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5507                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5508                 iwm_stop(sc);
5509                 rv = 1;
5510                 goto out;
5511         }
5512
5513         /* firmware chunk loaded */
5514         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5515                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5516                 handled |= IWM_CSR_INT_BIT_FH_TX;
5517                 sc->sc_fw_chunk_done = 1;
5518                 wakeup(&sc->sc_fw);
5519         }
5520
5521         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5522                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5523                 if (iwm_check_rfkill(sc)) {
5524                         device_printf(sc->sc_dev,
5525                             "%s: rfkill switch, disabling interface\n",
5526                             __func__);
5527                         iwm_stop(sc);
5528                 }
5529         }
5530
5531         /*
5532          * The Linux driver uses periodic interrupts to avoid races.
5533          * We cargo-cult like it's going out of fashion.
5534          */
5535         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5536                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5537                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5538                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5539                         IWM_WRITE_1(sc,
5540                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5541                 isperiodic = 1;
5542         }
5543
5544         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5545                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5546                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5547
5548                 iwm_notif_intr(sc);
5549
5550                 /* enable periodic interrupt, see above */
5551                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5552                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5553                             IWM_CSR_INT_PERIODIC_ENA);
5554         }
5555
5556         if (__predict_false(r1 & ~handled))
5557                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5558                     "%s: unhandled interrupts: %x\n", __func__, r1);
5559         rv = 1;
5560
5561  out_ena:
5562         iwm_restore_interrupts(sc);
5563  out:
5564         IWM_UNLOCK(sc);
5565         return;
5566 }
5567
5568 /*
5569  * Autoconf glue-sniffing
5570  */
5571 #define PCI_VENDOR_INTEL                0x8086
5572 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5573 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5574 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5575 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5576 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5577 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5578 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5579 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5580 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5581 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5582
5583 static const struct iwm_devices {
5584         uint16_t        device;
5585         const char      *name;
5586 } iwm_devices[] = {
5587         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5588         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5589         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5590         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5591         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5592         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5593         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5594         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5595         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5596         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5597 };
5598
5599 static int
5600 iwm_probe(device_t dev)
5601 {
5602         int i;
5603
5604         for (i = 0; i < nitems(iwm_devices); i++) {
5605                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5606                     pci_get_device(dev) == iwm_devices[i].device) {
5607                         device_set_desc(dev, iwm_devices[i].name);
5608                         return (BUS_PROBE_DEFAULT);
5609                 }
5610         }
5611
5612         return (ENXIO);
5613 }
5614
5615 static int
5616 iwm_dev_check(device_t dev)
5617 {
5618         struct iwm_softc *sc;
5619
5620         sc = device_get_softc(dev);
5621
5622         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5623         switch (pci_get_device(dev)) {
5624         case PCI_PRODUCT_INTEL_WL_3160_1:
5625         case PCI_PRODUCT_INTEL_WL_3160_2:
5626                 sc->sc_fwname = "iwm3160fw";
5627                 sc->host_interrupt_operation_mode = 1;
5628                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5629                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5630                 return (0);
5631         case PCI_PRODUCT_INTEL_WL_3165_1:
5632         case PCI_PRODUCT_INTEL_WL_3165_2:
5633                 sc->sc_fwname = "iwm7265fw";
5634                 sc->host_interrupt_operation_mode = 0;
5635                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5636                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5637                 return (0);
5638         case PCI_PRODUCT_INTEL_WL_7260_1:
5639         case PCI_PRODUCT_INTEL_WL_7260_2:
5640                 sc->sc_fwname = "iwm7260fw";
5641                 sc->host_interrupt_operation_mode = 1;
5642                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5643                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5644                 return (0);
5645         case PCI_PRODUCT_INTEL_WL_7265_1:
5646         case PCI_PRODUCT_INTEL_WL_7265_2:
5647                 sc->sc_fwname = "iwm7265fw";
5648                 sc->host_interrupt_operation_mode = 0;
5649                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5650                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5651                 return (0);
5652         case PCI_PRODUCT_INTEL_WL_8260_1:
5653         case PCI_PRODUCT_INTEL_WL_8260_2:
5654                 sc->sc_fwname = "iwm8000Cfw";
5655                 sc->host_interrupt_operation_mode = 0;
5656                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5657                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5658                 return (0);
5659         default:
5660                 device_printf(dev, "unknown adapter type\n");
5661                 return ENXIO;
5662         }
5663 }
5664
5665 static int
5666 iwm_pci_attach(device_t dev)
5667 {
5668         struct iwm_softc *sc;
5669         int count, error, rid;
5670         uint16_t reg;
5671 #if defined(__DragonFly__)
5672         int irq_flags;
5673 #endif
5674
5675         sc = device_get_softc(dev);
5676
5677         /* Clear device-specific "PCI retry timeout" register (41h). */
5678         reg = pci_read_config(dev, 0x40, sizeof(reg));
5679         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5680
5681         /* Enable bus-mastering and hardware bug workaround. */
5682         pci_enable_busmaster(dev);
5683         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5684         /* if !MSI */
5685         if (reg & PCIM_STATUS_INTxSTATE) {
5686                 reg &= ~PCIM_STATUS_INTxSTATE;
5687         }
5688         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5689
5690         rid = PCIR_BAR(0);
5691         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5692             RF_ACTIVE);
5693         if (sc->sc_mem == NULL) {
5694                 device_printf(sc->sc_dev, "can't map mem space\n");
5695                 return (ENXIO);
5696         }
5697         sc->sc_st = rman_get_bustag(sc->sc_mem);
5698         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5699
5700         /* Install interrupt handler. */
5701         count = 1;
5702         rid = 0;
5703 #if defined(__DragonFly__)
5704         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5705         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5706 #else
5707         if (pci_alloc_msi(dev, &count) == 0)
5708                 rid = 1;
5709         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5710             (rid != 0 ? 0 : RF_SHAREABLE));
5711 #endif
5712         if (sc->sc_irq == NULL) {
5713                 device_printf(dev, "can't map interrupt\n");
5714                         return (ENXIO);
5715         }
5716 #if defined(__DragonFly__)
5717         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5718                                iwm_intr, sc, &sc->sc_ih,
5719                                &wlan_global_serializer);
5720 #else
5721         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5722             NULL, iwm_intr, sc, &sc->sc_ih);
5723 #endif
5724         if (sc->sc_ih == NULL) {
5725                 device_printf(dev, "can't establish interrupt");
5726 #if defined(__DragonFly__)
5727                 pci_release_msi(dev);
5728 #endif
5729                         return (ENXIO);
5730         }
5731         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5732
5733         return (0);
5734 }
5735
5736 static void
5737 iwm_pci_detach(device_t dev)
5738 {
5739         struct iwm_softc *sc = device_get_softc(dev);
5740
5741         if (sc->sc_irq != NULL) {
5742                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5743                 bus_release_resource(dev, SYS_RES_IRQ,
5744                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5745                 pci_release_msi(dev);
5746 #if defined(__DragonFly__)
5747                 sc->sc_irq = NULL;
5748 #endif
5749         }
5750         if (sc->sc_mem != NULL) {
5751                 bus_release_resource(dev, SYS_RES_MEMORY,
5752                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5753 #if defined(__DragonFly__)
5754                 sc->sc_mem = NULL;
5755 #endif
5756         }
5757 }
5758
5759
5760
5761 static int
5762 iwm_attach(device_t dev)
5763 {
5764         struct iwm_softc *sc = device_get_softc(dev);
5765         struct ieee80211com *ic = &sc->sc_ic;
5766         int error;
5767         int txq_i, i;
5768
5769         sc->sc_dev = dev;
5770         IWM_LOCK_INIT(sc);
5771         mbufq_init(&sc->sc_snd, ifqmaxlen);
5772 #if defined(__DragonFly__)
5773         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5774 #else
5775         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5776 #endif
5777         callout_init(&sc->sc_led_blink_to);
5778         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5779         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5780             taskqueue_thread_enqueue, &sc->sc_tq);
5781 #if defined(__DragonFly__)
5782         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5783                                         -1, "iwm_taskq");
5784 #else
5785         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5786 #endif
5787         if (error != 0) {
5788                 device_printf(dev, "can't start threads, error %d\n",
5789                     error);
5790                 goto fail;
5791         }
5792
5793         /* PCI attach */
5794         error = iwm_pci_attach(dev);
5795         if (error != 0)
5796                 goto fail;
5797
5798         sc->sc_wantresp = -1;
5799
5800         /* Check device type */
5801         error = iwm_dev_check(dev);
5802         if (error != 0)
5803                 goto fail;
5804
5805         /*
5806          * We now start fiddling with the hardware
5807          */
5808         /*
5809          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5810          * changed, and now the revision step also includes bit 0-1 (no more
5811          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5812          * in the old format.
5813          */
5814         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5815                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5816                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5817
5818         if (iwm_prepare_card_hw(sc) != 0) {
5819                 device_printf(dev, "could not initialize hardware\n");
5820                 goto fail;
5821         }
5822
5823         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5824                 int ret;
5825                 uint32_t hw_step;
5826
5827                 /*
5828                  * In order to recognize C step the driver should read the
5829                  * chip version id located at the AUX bus MISC address.
5830                  */
5831                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5832                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5833                 DELAY(2);
5834
5835                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5836                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5837                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5838                                    25000);
5839                 if (!ret) {
5840                         device_printf(sc->sc_dev,
5841                             "Failed to wake up the nic\n");
5842                         goto fail;
5843                 }
5844
5845                 if (iwm_nic_lock(sc)) {
5846                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5847                         hw_step |= IWM_ENABLE_WFPM;
5848                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5849                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5850                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5851                         if (hw_step == 0x3)
5852                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5853                                                 (IWM_SILICON_C_STEP << 2);
5854                         iwm_nic_unlock(sc);
5855                 } else {
5856                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5857                         goto fail;
5858                 }
5859         }
5860
5861         /* Allocate DMA memory for firmware transfers. */
5862         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5863                 device_printf(dev, "could not allocate memory for firmware\n");
5864                 goto fail;
5865         }
5866
5867         /* Allocate "Keep Warm" page. */
5868         if ((error = iwm_alloc_kw(sc)) != 0) {
5869                 device_printf(dev, "could not allocate keep warm page\n");
5870                 goto fail;
5871         }
5872
5873         /* We use ICT interrupts */
5874         if ((error = iwm_alloc_ict(sc)) != 0) {
5875                 device_printf(dev, "could not allocate ICT table\n");
5876                 goto fail;
5877         }
5878
5879         /* Allocate TX scheduler "rings". */
5880         if ((error = iwm_alloc_sched(sc)) != 0) {
5881                 device_printf(dev, "could not allocate TX scheduler rings\n");
5882                 goto fail;
5883         }
5884
5885         /* Allocate TX rings */
5886         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5887                 if ((error = iwm_alloc_tx_ring(sc,
5888                     &sc->txq[txq_i], txq_i)) != 0) {
5889                         device_printf(dev,
5890                             "could not allocate TX ring %d\n",
5891                             txq_i);
5892                         goto fail;
5893                 }
5894         }
5895
5896         /* Allocate RX ring. */
5897         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5898                 device_printf(dev, "could not allocate RX ring\n");
5899                 goto fail;
5900         }
5901
5902         /* Clear pending interrupts. */
5903         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5904
5905         ic->ic_softc = sc;
5906         ic->ic_name = device_get_nameunit(sc->sc_dev);
5907         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5908         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5909
5910         /* Set device capabilities. */
5911         ic->ic_caps =
5912             IEEE80211_C_STA |
5913             IEEE80211_C_WPA |           /* WPA/RSN */
5914             IEEE80211_C_WME |
5915             IEEE80211_C_SHSLOT |        /* short slot time supported */
5916             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5917 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5918             ;
5919         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5920                 sc->sc_phyctxt[i].id = i;
5921                 sc->sc_phyctxt[i].color = 0;
5922                 sc->sc_phyctxt[i].ref = 0;
5923                 sc->sc_phyctxt[i].channel = NULL;
5924         }
5925
5926         /* Max RSSI */
5927         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5928         sc->sc_preinit_hook.ich_func = iwm_preinit;
5929         sc->sc_preinit_hook.ich_arg = sc;
5930         sc->sc_preinit_hook.ich_desc = "iwm";
5931         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5932                 device_printf(dev, "config_intrhook_establish failed\n");
5933                 goto fail;
5934         }
5935
5936 #ifdef IWM_DEBUG
5937         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5938             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5939             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5940 #endif
5941
5942         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5943             "<-%s\n", __func__);
5944
5945         return 0;
5946
5947         /* Free allocated memory if something failed during attachment. */
5948 fail:
5949         iwm_detach_local(sc, 0);
5950
5951         return ENXIO;
5952 }
5953
5954 static int
5955 iwm_is_valid_ether_addr(uint8_t *addr)
5956 {
5957         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5958
5959         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5960                 return (FALSE);
5961
5962         return (TRUE);
5963 }
5964
5965 static int
5966 iwm_update_edca(struct ieee80211com *ic)
5967 {
5968         struct iwm_softc *sc = ic->ic_softc;
5969
5970         device_printf(sc->sc_dev, "%s: called\n", __func__);
5971         return (0);
5972 }
5973
5974 static void
5975 iwm_preinit(void *arg)
5976 {
5977         struct iwm_softc *sc = arg;
5978         device_t dev = sc->sc_dev;
5979         struct ieee80211com *ic = &sc->sc_ic;
5980         int error;
5981
5982         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5983             "->%s\n", __func__);
5984
5985         IWM_LOCK(sc);
5986         if ((error = iwm_start_hw(sc)) != 0) {
5987                 device_printf(dev, "could not initialize hardware\n");
5988                 IWM_UNLOCK(sc);
5989                 goto fail;
5990         }
5991
5992         error = iwm_run_init_mvm_ucode(sc, 1);
5993         iwm_stop_device(sc);
5994         if (error) {
5995                 IWM_UNLOCK(sc);
5996                 goto fail;
5997         }
5998         device_printf(dev,
5999             "hw rev 0x%x, fw ver %s, address %s\n",
6000             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6001             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
6002
6003         /* not all hardware can do 5GHz band */
6004         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6005                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6006                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6007         IWM_UNLOCK(sc);
6008
6009         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6010             ic->ic_channels);
6011
6012         /*
6013          * At this point we've committed - if we fail to do setup,
6014          * we now also have to tear down the net80211 state.
6015          */
6016         ieee80211_ifattach(ic);
6017         ic->ic_vap_create = iwm_vap_create;
6018         ic->ic_vap_delete = iwm_vap_delete;
6019         ic->ic_raw_xmit = iwm_raw_xmit;
6020         ic->ic_node_alloc = iwm_node_alloc;
6021         ic->ic_scan_start = iwm_scan_start;
6022         ic->ic_scan_end = iwm_scan_end;
6023         ic->ic_update_mcast = iwm_update_mcast;
6024         ic->ic_getradiocaps = iwm_init_channel_map;
6025         ic->ic_set_channel = iwm_set_channel;
6026         ic->ic_scan_curchan = iwm_scan_curchan;
6027         ic->ic_scan_mindwell = iwm_scan_mindwell;
6028         ic->ic_wme.wme_update = iwm_update_edca;
6029         ic->ic_parent = iwm_parent;
6030         ic->ic_transmit = iwm_transmit;
6031         iwm_radiotap_attach(sc);
6032         if (bootverbose)
6033                 ieee80211_announce(ic);
6034
6035         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6036             "<-%s\n", __func__);
6037         config_intrhook_disestablish(&sc->sc_preinit_hook);
6038
6039         return;
6040 fail:
6041         config_intrhook_disestablish(&sc->sc_preinit_hook);
6042         iwm_detach_local(sc, 0);
6043 }
6044
6045 /*
6046  * Attach the interface to 802.11 radiotap.
6047  */
6048 static void
6049 iwm_radiotap_attach(struct iwm_softc *sc)
6050 {
6051         struct ieee80211com *ic = &sc->sc_ic;
6052
6053         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6054             "->%s begin\n", __func__);
6055         ieee80211_radiotap_attach(ic,
6056             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6057                 IWM_TX_RADIOTAP_PRESENT,
6058             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6059                 IWM_RX_RADIOTAP_PRESENT);
6060         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6061             "->%s end\n", __func__);
6062 }
6063
6064 static struct ieee80211vap *
6065 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6066     enum ieee80211_opmode opmode, int flags,
6067     const uint8_t bssid[IEEE80211_ADDR_LEN],
6068     const uint8_t mac[IEEE80211_ADDR_LEN])
6069 {
6070         struct iwm_vap *ivp;
6071         struct ieee80211vap *vap;
6072
6073         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6074                 return NULL;
6075         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6076         vap = &ivp->iv_vap;
6077         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6078         vap->iv_bmissthreshold = 10;            /* override default */
6079         /* Override with driver methods. */
6080         ivp->iv_newstate = vap->iv_newstate;
6081         vap->iv_newstate = iwm_newstate;
6082
6083         ieee80211_ratectl_init(vap);
6084         /* Complete setup. */
6085         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6086             mac);
6087         ic->ic_opmode = opmode;
6088
6089         return vap;
6090 }
6091
6092 static void
6093 iwm_vap_delete(struct ieee80211vap *vap)
6094 {
6095         struct iwm_vap *ivp = IWM_VAP(vap);
6096
6097         ieee80211_ratectl_deinit(vap);
6098         ieee80211_vap_detach(vap);
6099         kfree(ivp, M_80211_VAP);
6100 }
6101
6102 static void
6103 iwm_scan_start(struct ieee80211com *ic)
6104 {
6105         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6106         struct iwm_softc *sc = ic->ic_softc;
6107         int error;
6108
6109         IWM_LOCK(sc);
6110         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6111                 error = iwm_mvm_umac_scan(sc);
6112         else
6113                 error = iwm_mvm_lmac_scan(sc);
6114         if (error != 0) {
6115                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6116                 IWM_UNLOCK(sc);
6117                 ieee80211_cancel_scan(vap);
6118         } else {
6119                 iwm_led_blink_start(sc);
6120                 IWM_UNLOCK(sc);
6121         }
6122 }
6123
6124 static void
6125 iwm_scan_end(struct ieee80211com *ic)
6126 {
6127         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6128         struct iwm_softc *sc = ic->ic_softc;
6129
6130         IWM_LOCK(sc);
6131         iwm_led_blink_stop(sc);
6132         if (vap->iv_state == IEEE80211_S_RUN)
6133                 iwm_mvm_led_enable(sc);
6134         IWM_UNLOCK(sc);
6135 }
6136
6137 static void
6138 iwm_update_mcast(struct ieee80211com *ic)
6139 {
6140 }
6141
6142 static void
6143 iwm_set_channel(struct ieee80211com *ic)
6144 {
6145 }
6146
6147 static void
6148 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6149 {
6150 }
6151
6152 static void
6153 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6154 {
6155         return;
6156 }
6157
6158 void
6159 iwm_init_task(void *arg1)
6160 {
6161         struct iwm_softc *sc = arg1;
6162
6163         IWM_LOCK(sc);
6164         while (sc->sc_flags & IWM_FLAG_BUSY) {
6165 #if defined(__DragonFly__)
6166                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6167 #else
6168                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6169 #endif
6170 }
6171         sc->sc_flags |= IWM_FLAG_BUSY;
6172         iwm_stop(sc);
6173         if (sc->sc_ic.ic_nrunning > 0)
6174                 iwm_init(sc);
6175         sc->sc_flags &= ~IWM_FLAG_BUSY;
6176         wakeup(&sc->sc_flags);
6177         IWM_UNLOCK(sc);
6178 }
6179
6180 static int
6181 iwm_resume(device_t dev)
6182 {
6183         struct iwm_softc *sc = device_get_softc(dev);
6184         int do_reinit = 0;
6185         uint16_t reg;
6186
6187         /* Clear device-specific "PCI retry timeout" register (41h). */
6188         reg = pci_read_config(dev, 0x40, sizeof(reg));
6189         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6190         iwm_init_task(device_get_softc(dev));
6191
6192         IWM_LOCK(sc);
6193         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6194                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6195                 do_reinit = 1;
6196         }
6197         IWM_UNLOCK(sc);
6198
6199         if (do_reinit)
6200                 ieee80211_resume_all(&sc->sc_ic);
6201
6202         return 0;
6203 }
6204
6205 static int
6206 iwm_suspend(device_t dev)
6207 {
6208         int do_stop = 0;
6209         struct iwm_softc *sc = device_get_softc(dev);
6210
6211         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6212
6213         ieee80211_suspend_all(&sc->sc_ic);
6214
6215         if (do_stop) {
6216                 IWM_LOCK(sc);
6217                 iwm_stop(sc);
6218                 sc->sc_flags |= IWM_FLAG_SCANNING;
6219                 IWM_UNLOCK(sc);
6220         }
6221
6222         return (0);
6223 }
6224
6225 static int
6226 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6227 {
6228         struct iwm_fw_info *fw = &sc->sc_fw;
6229         device_t dev = sc->sc_dev;
6230         int i;
6231
6232         if (sc->sc_tq) {
6233 #if defined(__DragonFly__)
6234                 /* doesn't exist for DFly, DFly drains tasks on free */
6235 #else
6236                 taskqueue_drain_all(sc->sc_tq);
6237 #endif
6238                 taskqueue_free(sc->sc_tq);
6239 #if defined(__DragonFly__)
6240                 sc->sc_tq = NULL;
6241 #endif
6242         }
6243         callout_drain(&sc->sc_led_blink_to);
6244         callout_drain(&sc->sc_watchdog_to);
6245         iwm_stop_device(sc);
6246         if (do_net80211) {
6247                 ieee80211_ifdetach(&sc->sc_ic);
6248         }
6249
6250         iwm_phy_db_free(sc);
6251
6252         /* Free descriptor rings */
6253         iwm_free_rx_ring(sc, &sc->rxq);
6254         for (i = 0; i < nitems(sc->txq); i++)
6255                 iwm_free_tx_ring(sc, &sc->txq[i]);
6256
6257         /* Free firmware */
6258         if (fw->fw_fp != NULL)
6259                 iwm_fw_info_free(fw);
6260
6261         /* Free scheduler */
6262         iwm_dma_contig_free(&sc->sched_dma);
6263         iwm_dma_contig_free(&sc->ict_dma);
6264         iwm_dma_contig_free(&sc->kw_dma);
6265         iwm_dma_contig_free(&sc->fw_dma);
6266
6267         /* Finished with the hardware - detach things */
6268         iwm_pci_detach(dev);
6269
6270         mbufq_drain(&sc->sc_snd);
6271         IWM_LOCK_DESTROY(sc);
6272
6273         return (0);
6274 }
6275
6276 static int
6277 iwm_detach(device_t dev)
6278 {
6279         struct iwm_softc *sc = device_get_softc(dev);
6280
6281         return (iwm_detach_local(sc, 1));
6282 }
6283
6284 static device_method_t iwm_pci_methods[] = {
6285         /* Device interface */
6286         DEVMETHOD(device_probe,         iwm_probe),
6287         DEVMETHOD(device_attach,        iwm_attach),
6288         DEVMETHOD(device_detach,        iwm_detach),
6289         DEVMETHOD(device_suspend,       iwm_suspend),
6290         DEVMETHOD(device_resume,        iwm_resume),
6291
6292         DEVMETHOD_END
6293 };
6294
6295 static driver_t iwm_pci_driver = {
6296         "iwm",
6297         iwm_pci_methods,
6298         sizeof (struct iwm_softc)
6299 };
6300
6301 static devclass_t iwm_devclass;
6302
6303 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6304 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6305 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6306 MODULE_DEPEND(iwm, wlan, 1, 1, 1);