if_iwm - Fix iwm_poll_bit() error value check in iwm_attach().
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154
155 #include <machine/endian.h>
156
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159
160 #include <net/bpf.h>
161
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192
193 const uint8_t iwm_nvm_channels[] = {
194         /* 2.4 GHz */
195         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196         /* 5 GHz */
197         36, 40, 44, 48, 52, 56, 60, 64,
198         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199         149, 153, 157, 161, 165
200 };
201 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
202     "IWM_NUM_CHANNELS is too small");
203
204 const uint8_t iwm_nvm_channels_8000[] = {
205         /* 2.4 GHz */
206         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207         /* 5 GHz */
208         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
209         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
210         149, 153, 157, 161, 165, 169, 173, 177, 181
211 };
212 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
213     "IWM_NUM_CHANNELS_8000 is too small");
214
215 #define IWM_NUM_2GHZ_CHANNELS   14
216 #define IWM_N_HW_ADDR_MASK      0xF
217
218 /*
219  * XXX For now, there's simply a fixed set of rate table entries
220  * that are populated.
221  */
222 const struct iwm_rate {
223         uint8_t rate;
224         uint8_t plcp;
225 } iwm_rates[] = {
226         {   2,  IWM_RATE_1M_PLCP  },
227         {   4,  IWM_RATE_2M_PLCP  },
228         {  11,  IWM_RATE_5M_PLCP  },
229         {  22,  IWM_RATE_11M_PLCP },
230         {  12,  IWM_RATE_6M_PLCP  },
231         {  18,  IWM_RATE_9M_PLCP  },
232         {  24,  IWM_RATE_12M_PLCP },
233         {  36,  IWM_RATE_18M_PLCP },
234         {  48,  IWM_RATE_24M_PLCP },
235         {  72,  IWM_RATE_36M_PLCP },
236         {  96,  IWM_RATE_48M_PLCP },
237         { 108,  IWM_RATE_54M_PLCP },
238 };
239 #define IWM_RIDX_CCK    0
240 #define IWM_RIDX_OFDM   4
241 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
242 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
243 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244
245 struct iwm_nvm_section {
246         uint16_t length;
247         uint8_t *data;
248 };
249
250 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
251 static int      iwm_firmware_store_section(struct iwm_softc *,
252                                            enum iwm_ucode_type,
253                                            const uint8_t *, size_t);
254 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
255 static void     iwm_fw_info_free(struct iwm_fw_info *);
256 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
257 #if !defined(__DragonFly__)
258 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
259 #endif
260 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
261                                      bus_size_t, bus_size_t);
262 static void     iwm_dma_contig_free(struct iwm_dma_info *);
263 static int      iwm_alloc_fwmem(struct iwm_softc *);
264 static void     iwm_free_fwmem(struct iwm_softc *);
265 static int      iwm_alloc_sched(struct iwm_softc *);
266 static void     iwm_free_sched(struct iwm_softc *);
267 static int      iwm_alloc_kw(struct iwm_softc *);
268 static void     iwm_free_kw(struct iwm_softc *);
269 static int      iwm_alloc_ict(struct iwm_softc *);
270 static void     iwm_free_ict(struct iwm_softc *);
271 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void     iwm_disable_rx_dma(struct iwm_softc *);
273 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
275 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
276                                   int);
277 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
279 static void     iwm_enable_interrupts(struct iwm_softc *);
280 static void     iwm_restore_interrupts(struct iwm_softc *);
281 static void     iwm_disable_interrupts(struct iwm_softc *);
282 static void     iwm_ict_reset(struct iwm_softc *);
283 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
284 static void     iwm_stop_device(struct iwm_softc *);
285 static void     iwm_mvm_nic_config(struct iwm_softc *);
286 static int      iwm_nic_rx_init(struct iwm_softc *);
287 static int      iwm_nic_tx_init(struct iwm_softc *);
288 static int      iwm_nic_init(struct iwm_softc *);
289 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
290 static int      iwm_post_alive(struct iwm_softc *);
291 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
292                                    uint16_t, uint8_t *, uint16_t *);
293 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
294                                      uint16_t *, size_t);
295 static uint32_t iwm_eeprom_channel_flags(uint16_t);
296 static void     iwm_add_channel_band(struct iwm_softc *,
297                     struct ieee80211_channel[], int, int *, int, size_t,
298                     const uint8_t[]);
299 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
300                     struct ieee80211_channel[]);
301 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302                                    const uint16_t *, const uint16_t *,
303                                    const uint16_t *, const uint16_t *,
304                                    const uint16_t *);
305 static void     iwm_set_hw_address_8000(struct iwm_softc *,
306                                         struct iwm_nvm_data *,
307                                         const uint16_t *, const uint16_t *);
308 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
309                             const uint16_t *);
310 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
311 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
312                                   const uint16_t *);
313 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
314                                    const uint16_t *);
315 static void     iwm_set_radio_cfg(const struct iwm_softc *,
316                                   struct iwm_nvm_data *, uint32_t);
317 static int      iwm_parse_nvm_sections(struct iwm_softc *,
318                                        struct iwm_nvm_section *);
319 static int      iwm_nvm_init(struct iwm_softc *);
320 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
321                                        const uint8_t *, uint32_t);
322 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
323                                         const uint8_t *, uint32_t);
324 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
325 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
326                                            struct iwm_fw_sects *, int , int *);
327 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
328 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
329 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
330 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
331 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
332 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
333                                               enum iwm_ucode_type);
334 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
335 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
336 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
337 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
338                                             struct iwm_rx_phy_info *);
339 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
340                                       struct iwm_rx_packet *,
341                                       struct iwm_rx_data *);
342 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
343 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
344                                    struct iwm_rx_data *);
345 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
346                                          struct iwm_rx_packet *,
347                                          struct iwm_node *);
348 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
349                                   struct iwm_rx_data *);
350 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
351 #if 0
352 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
353                                  uint16_t);
354 #endif
355 static const struct iwm_rate *
356         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
357                         struct ieee80211_frame *, struct iwm_tx_cmd *);
358 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
359                        struct ieee80211_node *, int);
360 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
361                              const struct ieee80211_bpf_params *);
362 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
363                                                 struct iwm_mvm_add_sta_cmd_v7 *,
364                                                 int *);
365 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
366                                        int);
367 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
368 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
369 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
370                                            struct iwm_int_sta *,
371                                            const uint8_t *, uint16_t, uint16_t);
372 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
373 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
374 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
375 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
376 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
377 static struct ieee80211_node *
378                 iwm_node_alloc(struct ieee80211vap *,
379                                const uint8_t[IEEE80211_ADDR_LEN]);
380 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
381 static int      iwm_media_change(struct ifnet *);
382 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
383 static void     iwm_endscan_cb(void *, int);
384 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
385                                         struct iwm_sf_cfg_cmd *,
386                                         struct ieee80211_node *);
387 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
388 static int      iwm_send_bt_init_conf(struct iwm_softc *);
389 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
390 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
391 static int      iwm_init_hw(struct iwm_softc *);
392 static void     iwm_init(struct iwm_softc *);
393 static void     iwm_start(struct iwm_softc *);
394 static void     iwm_stop(struct iwm_softc *);
395 static void     iwm_watchdog(void *);
396 static void     iwm_parent(struct ieee80211com *);
397 #ifdef IWM_DEBUG
398 static const char *
399                 iwm_desc_lookup(uint32_t);
400 static void     iwm_nic_error(struct iwm_softc *);
401 static void     iwm_nic_umac_error(struct iwm_softc *);
402 #endif
403 static void     iwm_notif_intr(struct iwm_softc *);
404 static void     iwm_intr(void *);
405 static int      iwm_attach(device_t);
406 static int      iwm_is_valid_ether_addr(uint8_t *);
407 static void     iwm_preinit(void *);
408 static int      iwm_detach_local(struct iwm_softc *sc, int);
409 static void     iwm_init_task(void *);
410 static void     iwm_radiotap_attach(struct iwm_softc *);
411 static struct ieee80211vap *
412                 iwm_vap_create(struct ieee80211com *,
413                                const char [IFNAMSIZ], int,
414                                enum ieee80211_opmode, int,
415                                const uint8_t [IEEE80211_ADDR_LEN],
416                                const uint8_t [IEEE80211_ADDR_LEN]);
417 static void     iwm_vap_delete(struct ieee80211vap *);
418 static void     iwm_scan_start(struct ieee80211com *);
419 static void     iwm_scan_end(struct ieee80211com *);
420 static void     iwm_update_mcast(struct ieee80211com *);
421 static void     iwm_set_channel(struct ieee80211com *);
422 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
423 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
424 static int      iwm_detach(device_t);
425
426 #if defined(__DragonFly__)
427 static int      iwm_msi_enable = 1;
428
429 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
430
431 #endif
432
433 /*
434  * Firmware parser.
435  */
436
437 static int
438 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
439 {
440         const struct iwm_fw_cscheme_list *l = (const void *)data;
441
442         if (dlen < sizeof(*l) ||
443             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
444                 return EINVAL;
445
446         /* we don't actually store anything for now, always use s/w crypto */
447
448         return 0;
449 }
450
451 static int
452 iwm_firmware_store_section(struct iwm_softc *sc,
453     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
454 {
455         struct iwm_fw_sects *fws;
456         struct iwm_fw_onesect *fwone;
457
458         if (type >= IWM_UCODE_TYPE_MAX)
459                 return EINVAL;
460         if (dlen < sizeof(uint32_t))
461                 return EINVAL;
462
463         fws = &sc->sc_fw.fw_sects[type];
464         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
465                 return EINVAL;
466
467         fwone = &fws->fw_sect[fws->fw_count];
468
469         /* first 32bit are device load offset */
470         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
471
472         /* rest is data */
473         fwone->fws_data = data + sizeof(uint32_t);
474         fwone->fws_len = dlen - sizeof(uint32_t);
475
476         fws->fw_count++;
477         fws->fw_totlen += fwone->fws_len;
478
479         return 0;
480 }
481
482 struct iwm_tlv_calib_data {
483         uint32_t ucode_type;
484         struct iwm_tlv_calib_ctrl calib;
485 } __packed;
486
487 static int
488 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
489 {
490         const struct iwm_tlv_calib_data *def_calib = data;
491         uint32_t ucode_type = le32toh(def_calib->ucode_type);
492
493         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
494                 device_printf(sc->sc_dev,
495                     "Wrong ucode_type %u for default "
496                     "calibration.\n", ucode_type);
497                 return EINVAL;
498         }
499
500         sc->sc_default_calib[ucode_type].flow_trigger =
501             def_calib->calib.flow_trigger;
502         sc->sc_default_calib[ucode_type].event_trigger =
503             def_calib->calib.event_trigger;
504
505         return 0;
506 }
507
508 static void
509 iwm_fw_info_free(struct iwm_fw_info *fw)
510 {
511         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
512         fw->fw_fp = NULL;
513         /* don't touch fw->fw_status */
514         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
515 }
516
517 static int
518 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
519 {
520         struct iwm_fw_info *fw = &sc->sc_fw;
521         const struct iwm_tlv_ucode_header *uhdr;
522         struct iwm_ucode_tlv tlv;
523         enum iwm_ucode_tlv_type tlv_type;
524         const struct firmware *fwp;
525         const uint8_t *data;
526         int error = 0;
527         size_t len;
528
529         if (fw->fw_status == IWM_FW_STATUS_DONE &&
530             ucode_type != IWM_UCODE_TYPE_INIT)
531                 return 0;
532
533         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
534 #if defined(__DragonFly__)
535                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
536 #else
537                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
538 #endif
539         }
540         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
541
542         if (fw->fw_fp != NULL)
543                 iwm_fw_info_free(fw);
544
545         /*
546          * Load firmware into driver memory.
547          * fw_fp will be set.
548          */
549         IWM_UNLOCK(sc);
550         fwp = firmware_get(sc->sc_fwname);
551         IWM_LOCK(sc);
552         if (fwp == NULL) {
553                 device_printf(sc->sc_dev,
554                     "could not read firmware %s (error %d)\n",
555                     sc->sc_fwname, error);
556                 goto out;
557         }
558         fw->fw_fp = fwp;
559
560         /* (Re-)Initialize default values. */
561         sc->sc_capaflags = 0;
562         sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
563         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
564         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
565
566         /*
567          * Parse firmware contents
568          */
569
570         uhdr = (const void *)fw->fw_fp->data;
571         if (*(const uint32_t *)fw->fw_fp->data != 0
572             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
573                 device_printf(sc->sc_dev, "invalid firmware %s\n",
574                     sc->sc_fwname);
575                 error = EINVAL;
576                 goto out;
577         }
578
579         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
580             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
581             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
582             IWM_UCODE_API(le32toh(uhdr->ver)));
583         data = uhdr->data;
584         len = fw->fw_fp->datasize - sizeof(*uhdr);
585
586         while (len >= sizeof(tlv)) {
587                 size_t tlv_len;
588                 const void *tlv_data;
589
590                 memcpy(&tlv, data, sizeof(tlv));
591                 tlv_len = le32toh(tlv.length);
592                 tlv_type = le32toh(tlv.type);
593
594                 len -= sizeof(tlv);
595                 data += sizeof(tlv);
596                 tlv_data = data;
597
598                 if (len < tlv_len) {
599                         device_printf(sc->sc_dev,
600                             "firmware too short: %zu bytes\n",
601                             len);
602                         error = EINVAL;
603                         goto parse_out;
604                 }
605
606                 switch ((int)tlv_type) {
607                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
608                         if (tlv_len < sizeof(uint32_t)) {
609                                 device_printf(sc->sc_dev,
610                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
611                                     __func__,
612                                     (int) tlv_len);
613                                 error = EINVAL;
614                                 goto parse_out;
615                         }
616                         sc->sc_capa_max_probe_len
617                             = le32toh(*(const uint32_t *)tlv_data);
618                         /* limit it to something sensible */
619                         if (sc->sc_capa_max_probe_len >
620                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
621                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
622                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
623                                     "ridiculous\n", __func__);
624                                 error = EINVAL;
625                                 goto parse_out;
626                         }
627                         break;
628                 case IWM_UCODE_TLV_PAN:
629                         if (tlv_len) {
630                                 device_printf(sc->sc_dev,
631                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
632                                     __func__,
633                                     (int) tlv_len);
634                                 error = EINVAL;
635                                 goto parse_out;
636                         }
637                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
638                         break;
639                 case IWM_UCODE_TLV_FLAGS:
640                         if (tlv_len < sizeof(uint32_t)) {
641                                 device_printf(sc->sc_dev,
642                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
643                                     __func__,
644                                     (int) tlv_len);
645                                 error = EINVAL;
646                                 goto parse_out;
647                         }
648                         /*
649                          * Apparently there can be many flags, but Linux driver
650                          * parses only the first one, and so do we.
651                          *
652                          * XXX: why does this override IWM_UCODE_TLV_PAN?
653                          * Intentional or a bug?  Observations from
654                          * current firmware file:
655                          *  1) TLV_PAN is parsed first
656                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
657                          * ==> this resets TLV_PAN to itself... hnnnk
658                          */
659                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
660                         break;
661                 case IWM_UCODE_TLV_CSCHEME:
662                         if ((error = iwm_store_cscheme(sc,
663                             tlv_data, tlv_len)) != 0) {
664                                 device_printf(sc->sc_dev,
665                                     "%s: iwm_store_cscheme(): returned %d\n",
666                                     __func__,
667                                     error);
668                                 goto parse_out;
669                         }
670                         break;
671                 case IWM_UCODE_TLV_NUM_OF_CPU: {
672                         uint32_t num_cpu;
673                         if (tlv_len != sizeof(uint32_t)) {
674                                 device_printf(sc->sc_dev,
675                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
676                                     __func__,
677                                     (int) tlv_len);
678                                 error = EINVAL;
679                                 goto parse_out;
680                         }
681                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
682                         if (num_cpu < 1 || num_cpu > 2) {
683                                 device_printf(sc->sc_dev,
684                                     "%s: Driver supports only 1 or 2 CPUs\n",
685                                     __func__);
686                                 error = EINVAL;
687                                 goto parse_out;
688                         }
689                         break;
690                 }
691                 case IWM_UCODE_TLV_SEC_RT:
692                         if ((error = iwm_firmware_store_section(sc,
693                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_SEC_INIT:
702                         if ((error = iwm_firmware_store_section(sc,
703                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
704                                 device_printf(sc->sc_dev,
705                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
706                                     __func__,
707                                     error);
708                                 goto parse_out;
709                         }
710                         break;
711                 case IWM_UCODE_TLV_SEC_WOWLAN:
712                         if ((error = iwm_firmware_store_section(sc,
713                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
714                                 device_printf(sc->sc_dev,
715                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
716                                     __func__,
717                                     error);
718                                 goto parse_out;
719                         }
720                         break;
721                 case IWM_UCODE_TLV_DEF_CALIB:
722                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
723                                 device_printf(sc->sc_dev,
724                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
725                                     __func__,
726                                     (int) tlv_len,
727                                     (int) sizeof(struct iwm_tlv_calib_data));
728                                 error = EINVAL;
729                                 goto parse_out;
730                         }
731                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
732                                 device_printf(sc->sc_dev,
733                                     "%s: iwm_set_default_calib() failed: %d\n",
734                                     __func__,
735                                     error);
736                                 goto parse_out;
737                         }
738                         break;
739                 case IWM_UCODE_TLV_PHY_SKU:
740                         if (tlv_len != sizeof(uint32_t)) {
741                                 error = EINVAL;
742                                 device_printf(sc->sc_dev,
743                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
744                                     __func__,
745                                     (int) tlv_len);
746                                 goto parse_out;
747                         }
748                         sc->sc_fw_phy_config =
749                             le32toh(*(const uint32_t *)tlv_data);
750                         break;
751
752                 case IWM_UCODE_TLV_API_CHANGES_SET: {
753                         const struct iwm_ucode_api *api;
754                         if (tlv_len != sizeof(*api)) {
755                                 error = EINVAL;
756                                 goto parse_out;
757                         }
758                         api = (const struct iwm_ucode_api *)tlv_data;
759                         /* Flags may exceed 32 bits in future firmware. */
760                         if (le32toh(api->api_index) > 0) {
761                                 device_printf(sc->sc_dev,
762                                     "unsupported API index %d\n",
763                                     le32toh(api->api_index));
764                                 goto parse_out;
765                         }
766                         sc->sc_ucode_api = le32toh(api->api_flags);
767                         break;
768                 }
769
770                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
771                         const struct iwm_ucode_capa *capa;
772                         int idx, i;
773                         if (tlv_len != sizeof(*capa)) {
774                                 error = EINVAL;
775                                 goto parse_out;
776                         }
777                         capa = (const struct iwm_ucode_capa *)tlv_data;
778                         idx = le32toh(capa->api_index);
779                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
780                                 device_printf(sc->sc_dev,
781                                     "unsupported API index %d\n", idx);
782                                 goto parse_out;
783                         }
784                         for (i = 0; i < 32; i++) {
785                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
786                                         continue;
787                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
788                         }
789                         break;
790                 }
791
792                 case 48: /* undocumented TLV */
793                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
794                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
795                         /* ignore, not used by current driver */
796                         break;
797
798                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
799                         if ((error = iwm_firmware_store_section(sc,
800                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
801                             tlv_len)) != 0)
802                                 goto parse_out;
803                         break;
804
805                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
806                         if (tlv_len != sizeof(uint32_t)) {
807                                 error = EINVAL;
808                                 goto parse_out;
809                         }
810                         sc->sc_capa_n_scan_channels =
811                           le32toh(*(const uint32_t *)tlv_data);
812                         break;
813
814                 case IWM_UCODE_TLV_FW_VERSION:
815                         if (tlv_len != sizeof(uint32_t) * 3) {
816                                 error = EINVAL;
817                                 goto parse_out;
818                         }
819                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
820                             "%d.%d.%d",
821                             le32toh(((const uint32_t *)tlv_data)[0]),
822                             le32toh(((const uint32_t *)tlv_data)[1]),
823                             le32toh(((const uint32_t *)tlv_data)[2]));
824                         break;
825
826                 default:
827                         device_printf(sc->sc_dev,
828                             "%s: unknown firmware section %d, abort\n",
829                             __func__, tlv_type);
830                         error = EINVAL;
831                         goto parse_out;
832                 }
833
834                 len -= roundup(tlv_len, 4);
835                 data += roundup(tlv_len, 4);
836         }
837
838         KASSERT(error == 0, ("unhandled error"));
839
840  parse_out:
841         if (error) {
842                 device_printf(sc->sc_dev, "firmware parse error %d, "
843                     "section type %d\n", error, tlv_type);
844         }
845
846         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
847                 device_printf(sc->sc_dev,
848                     "device uses unsupported power ops\n");
849                 error = ENOTSUP;
850         }
851
852  out:
853         if (error) {
854                 fw->fw_status = IWM_FW_STATUS_NONE;
855                 if (fw->fw_fp != NULL)
856                         iwm_fw_info_free(fw);
857         } else
858                 fw->fw_status = IWM_FW_STATUS_DONE;
859         wakeup(&sc->sc_fw);
860
861         return error;
862 }
863
864 /*
865  * DMA resource routines
866  */
867
868 #if !defined(__DragonFly__)
869 static void
870 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
871 {
872         if (error != 0)
873                 return;
874         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
875         *(bus_addr_t *)arg = segs[0].ds_addr;
876 }
877 #endif
878
879 static int
880 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
881     bus_size_t size, bus_size_t alignment)
882 {
883         int error;
884
885         dma->tag = NULL;
886         dma->map = NULL;
887         dma->size = size;
888         dma->vaddr = NULL;
889
890 #if defined(__DragonFly__)
891         bus_dmamem_t dmem;
892         error = bus_dmamem_coherent(tag, alignment, 0,
893                                     BUS_SPACE_MAXADDR_32BIT,
894                                     BUS_SPACE_MAXADDR,
895                                     size, BUS_DMA_NOWAIT, &dmem);
896         if (error != 0)
897                 goto fail;
898
899         dma->tag = dmem.dmem_tag;
900         dma->map = dmem.dmem_map;
901         dma->vaddr = dmem.dmem_addr;
902         dma->paddr = dmem.dmem_busaddr;
903 #else
904         error = bus_dma_tag_create(tag, alignment,
905             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
906             1, size, 0, NULL, NULL, &dma->tag);
907         if (error != 0)
908                 goto fail;
909
910         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
911             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
912         if (error != 0)
913                 goto fail;
914
915         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
916             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
917         if (error != 0) {
918                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
919                 dma->vaddr = NULL;
920                 goto fail;
921         }
922 #endif
923
924         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
925
926         return 0;
927
928 fail:
929         iwm_dma_contig_free(dma);
930
931         return error;
932 }
933
934 static void
935 iwm_dma_contig_free(struct iwm_dma_info *dma)
936 {
937         if (dma->vaddr != NULL) {
938                 bus_dmamap_sync(dma->tag, dma->map,
939                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
940                 bus_dmamap_unload(dma->tag, dma->map);
941                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
942                 dma->vaddr = NULL;
943         }
944         if (dma->tag != NULL) {
945                 bus_dma_tag_destroy(dma->tag);
946                 dma->tag = NULL;
947         }
948 }
949
950 /* fwmem is used to load firmware onto the card */
951 static int
952 iwm_alloc_fwmem(struct iwm_softc *sc)
953 {
954         /* Must be aligned on a 16-byte boundary. */
955         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
956             sc->sc_fwdmasegsz, 16);
957 }
958
959 static void
960 iwm_free_fwmem(struct iwm_softc *sc)
961 {
962         iwm_dma_contig_free(&sc->fw_dma);
963 }
964
965 /* tx scheduler rings.  not used? */
966 static int
967 iwm_alloc_sched(struct iwm_softc *sc)
968 {
969         /* TX scheduler rings must be aligned on a 1KB boundary. */
970         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
971             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
972 }
973
974 static void
975 iwm_free_sched(struct iwm_softc *sc)
976 {
977         iwm_dma_contig_free(&sc->sched_dma);
978 }
979
980 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
981 static int
982 iwm_alloc_kw(struct iwm_softc *sc)
983 {
984         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
985 }
986
987 static void
988 iwm_free_kw(struct iwm_softc *sc)
989 {
990         iwm_dma_contig_free(&sc->kw_dma);
991 }
992
993 /* interrupt cause table */
994 static int
995 iwm_alloc_ict(struct iwm_softc *sc)
996 {
997         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
998             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
999 }
1000
1001 static void
1002 iwm_free_ict(struct iwm_softc *sc)
1003 {
1004         iwm_dma_contig_free(&sc->ict_dma);
1005 }
1006
1007 static int
1008 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1009 {
1010         bus_size_t size;
1011         int i, error;
1012
1013         ring->cur = 0;
1014
1015         /* Allocate RX descriptors (256-byte aligned). */
1016         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1017         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1018         if (error != 0) {
1019                 device_printf(sc->sc_dev,
1020                     "could not allocate RX ring DMA memory\n");
1021                 goto fail;
1022         }
1023         ring->desc = ring->desc_dma.vaddr;
1024
1025         /* Allocate RX status area (16-byte aligned). */
1026         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1027             sizeof(*ring->stat), 16);
1028         if (error != 0) {
1029                 device_printf(sc->sc_dev,
1030                     "could not allocate RX status DMA memory\n");
1031                 goto fail;
1032         }
1033         ring->stat = ring->stat_dma.vaddr;
1034
1035         /* Create RX buffer DMA tag. */
1036 #if defined(__DragonFly__)
1037         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1038                                    0,
1039                                    BUS_SPACE_MAXADDR_32BIT,
1040                                    BUS_SPACE_MAXADDR,
1041                                    NULL, NULL,
1042                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1043                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1044 #else
1045         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1046             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1047             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1048 #endif
1049         if (error != 0) {
1050                 device_printf(sc->sc_dev,
1051                     "%s: could not create RX buf DMA tag, error %d\n",
1052                     __func__, error);
1053                 goto fail;
1054         }
1055
1056         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1057         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1058         if (error != 0) {
1059                 device_printf(sc->sc_dev,
1060                     "%s: could not create RX buf DMA map, error %d\n",
1061                     __func__, error);
1062                 goto fail;
1063         }
1064         /*
1065          * Allocate and map RX buffers.
1066          */
1067         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1068                 struct iwm_rx_data *data = &ring->data[i];
1069                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1070                 if (error != 0) {
1071                         device_printf(sc->sc_dev,
1072                             "%s: could not create RX buf DMA map, error %d\n",
1073                             __func__, error);
1074                         goto fail;
1075                 }
1076                 data->m = NULL;
1077
1078                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1079                         goto fail;
1080                 }
1081         }
1082         return 0;
1083
1084 fail:   iwm_free_rx_ring(sc, ring);
1085         return error;
1086 }
1087
1088 static void
1089 iwm_disable_rx_dma(struct iwm_softc *sc)
1090 {
1091         /* XXX conditional nic locks are stupid */
1092         /* XXX print out if we can't lock the NIC? */
1093         if (iwm_nic_lock(sc)) {
1094                 /* XXX handle if RX stop doesn't finish? */
1095                 (void) iwm_pcie_rx_stop(sc);
1096                 iwm_nic_unlock(sc);
1097         }
1098 }
1099
1100 static void
1101 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1102 {
1103         /* Reset the ring state */
1104         ring->cur = 0;
1105
1106         /*
1107          * The hw rx ring index in shared memory must also be cleared,
1108          * otherwise the discrepancy can cause reprocessing chaos.
1109          */
1110         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1111 }
1112
1113 static void
1114 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1115 {
1116         int i;
1117
1118         iwm_dma_contig_free(&ring->desc_dma);
1119         iwm_dma_contig_free(&ring->stat_dma);
1120
1121         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1122                 struct iwm_rx_data *data = &ring->data[i];
1123
1124                 if (data->m != NULL) {
1125                         bus_dmamap_sync(ring->data_dmat, data->map,
1126                             BUS_DMASYNC_POSTREAD);
1127                         bus_dmamap_unload(ring->data_dmat, data->map);
1128                         m_freem(data->m);
1129                         data->m = NULL;
1130                 }
1131                 if (data->map != NULL) {
1132                         bus_dmamap_destroy(ring->data_dmat, data->map);
1133                         data->map = NULL;
1134                 }
1135         }
1136         if (ring->spare_map != NULL) {
1137                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1138                 ring->spare_map = NULL;
1139         }
1140         if (ring->data_dmat != NULL) {
1141                 bus_dma_tag_destroy(ring->data_dmat);
1142                 ring->data_dmat = NULL;
1143         }
1144 }
1145
1146 static int
1147 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1148 {
1149         bus_addr_t paddr;
1150         bus_size_t size;
1151         size_t maxsize;
1152         int nsegments;
1153         int i, error;
1154
1155         ring->qid = qid;
1156         ring->queued = 0;
1157         ring->cur = 0;
1158
1159         /* Allocate TX descriptors (256-byte aligned). */
1160         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1161         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1162         if (error != 0) {
1163                 device_printf(sc->sc_dev,
1164                     "could not allocate TX ring DMA memory\n");
1165                 goto fail;
1166         }
1167         ring->desc = ring->desc_dma.vaddr;
1168
1169         /*
1170          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1171          * to allocate commands space for other rings.
1172          */
1173         if (qid > IWM_MVM_CMD_QUEUE)
1174                 return 0;
1175
1176         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1177         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1178         if (error != 0) {
1179                 device_printf(sc->sc_dev,
1180                     "could not allocate TX cmd DMA memory\n");
1181                 goto fail;
1182         }
1183         ring->cmd = ring->cmd_dma.vaddr;
1184
1185         /* FW commands may require more mapped space than packets. */
1186         if (qid == IWM_MVM_CMD_QUEUE) {
1187                 maxsize = IWM_RBUF_SIZE;
1188                 nsegments = 1;
1189         } else {
1190                 maxsize = MCLBYTES;
1191                 nsegments = IWM_MAX_SCATTER - 2;
1192         }
1193
1194 #if defined(__DragonFly__)
1195         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1196                                    0,
1197                                    BUS_SPACE_MAXADDR_32BIT,
1198                                    BUS_SPACE_MAXADDR,
1199                                    NULL, NULL,
1200                                    maxsize, nsegments, maxsize,
1201                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1202 #else
1203         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1204             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1205             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1206 #endif
1207         if (error != 0) {
1208                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1209                 goto fail;
1210         }
1211
1212         paddr = ring->cmd_dma.paddr;
1213         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1214                 struct iwm_tx_data *data = &ring->data[i];
1215
1216                 data->cmd_paddr = paddr;
1217                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1218                     + offsetof(struct iwm_tx_cmd, scratch);
1219                 paddr += sizeof(struct iwm_device_cmd);
1220
1221                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1222                 if (error != 0) {
1223                         device_printf(sc->sc_dev,
1224                             "could not create TX buf DMA map\n");
1225                         goto fail;
1226                 }
1227         }
1228         KASSERT(paddr == ring->cmd_dma.paddr + size,
1229             ("invalid physical address"));
1230         return 0;
1231
1232 fail:   iwm_free_tx_ring(sc, ring);
1233         return error;
1234 }
1235
1236 static void
1237 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1238 {
1239         int i;
1240
1241         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1242                 struct iwm_tx_data *data = &ring->data[i];
1243
1244                 if (data->m != NULL) {
1245                         bus_dmamap_sync(ring->data_dmat, data->map,
1246                             BUS_DMASYNC_POSTWRITE);
1247                         bus_dmamap_unload(ring->data_dmat, data->map);
1248                         m_freem(data->m);
1249                         data->m = NULL;
1250                 }
1251         }
1252         /* Clear TX descriptors. */
1253         memset(ring->desc, 0, ring->desc_dma.size);
1254         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1255             BUS_DMASYNC_PREWRITE);
1256         sc->qfullmsk &= ~(1 << ring->qid);
1257         ring->queued = 0;
1258         ring->cur = 0;
1259 }
1260
1261 static void
1262 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1263 {
1264         int i;
1265
1266         iwm_dma_contig_free(&ring->desc_dma);
1267         iwm_dma_contig_free(&ring->cmd_dma);
1268
1269         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1270                 struct iwm_tx_data *data = &ring->data[i];
1271
1272                 if (data->m != NULL) {
1273                         bus_dmamap_sync(ring->data_dmat, data->map,
1274                             BUS_DMASYNC_POSTWRITE);
1275                         bus_dmamap_unload(ring->data_dmat, data->map);
1276                         m_freem(data->m);
1277                         data->m = NULL;
1278                 }
1279                 if (data->map != NULL) {
1280                         bus_dmamap_destroy(ring->data_dmat, data->map);
1281                         data->map = NULL;
1282                 }
1283         }
1284         if (ring->data_dmat != NULL) {
1285                 bus_dma_tag_destroy(ring->data_dmat);
1286                 ring->data_dmat = NULL;
1287         }
1288 }
1289
1290 /*
1291  * High-level hardware frobbing routines
1292  */
1293
1294 static void
1295 iwm_enable_interrupts(struct iwm_softc *sc)
1296 {
1297         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1298         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1299 }
1300
1301 static void
1302 iwm_restore_interrupts(struct iwm_softc *sc)
1303 {
1304         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1305 }
1306
1307 static void
1308 iwm_disable_interrupts(struct iwm_softc *sc)
1309 {
1310         /* disable interrupts */
1311         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1312
1313         /* acknowledge all interrupts */
1314         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1315         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1316 }
1317
1318 static void
1319 iwm_ict_reset(struct iwm_softc *sc)
1320 {
1321         iwm_disable_interrupts(sc);
1322
1323         /* Reset ICT table. */
1324         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1325         sc->ict_cur = 0;
1326
1327         /* Set physical address of ICT table (4KB aligned). */
1328         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1329             IWM_CSR_DRAM_INT_TBL_ENABLE
1330             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1331             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1332             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1333
1334         /* Switch to ICT interrupt mode in driver. */
1335         sc->sc_flags |= IWM_FLAG_USE_ICT;
1336
1337         /* Re-enable interrupts. */
1338         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1339         iwm_enable_interrupts(sc);
1340 }
1341
1342 /*
1343  * Since this .. hard-resets things, it's time to actually
1344  * mark the first vap (if any) as having no mac context.
1345  * It's annoying, but since the driver is potentially being
1346  * stop/start'ed whilst active (thanks openbsd port!) we
1347  * have to correctly track this.
1348  */
1349 static void
1350 iwm_stop_device(struct iwm_softc *sc)
1351 {
1352         struct ieee80211com *ic = &sc->sc_ic;
1353         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1354         int chnl, qid;
1355         uint32_t mask = 0;
1356
1357         /* tell the device to stop sending interrupts */
1358         iwm_disable_interrupts(sc);
1359
1360         /*
1361          * FreeBSD-local: mark the first vap as not-uploaded,
1362          * so the next transition through auth/assoc
1363          * will correctly populate the MAC context.
1364          */
1365         if (vap) {
1366                 struct iwm_vap *iv = IWM_VAP(vap);
1367                 iv->is_uploaded = 0;
1368         }
1369
1370         /* device going down, Stop using ICT table */
1371         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1372
1373         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1374
1375         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1376
1377         if (iwm_nic_lock(sc)) {
1378                 /* Stop each Tx DMA channel */
1379                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1380                         IWM_WRITE(sc,
1381                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1382                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1383                 }
1384
1385                 /* Wait for DMA channels to be idle */
1386                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1387                     5000)) {
1388                         device_printf(sc->sc_dev,
1389                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1390                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1391                 }
1392                 iwm_nic_unlock(sc);
1393         }
1394         iwm_disable_rx_dma(sc);
1395
1396         /* Stop RX ring. */
1397         iwm_reset_rx_ring(sc, &sc->rxq);
1398
1399         /* Reset all TX rings. */
1400         for (qid = 0; qid < nitems(sc->txq); qid++)
1401                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1402
1403         /*
1404          * Power-down device's busmaster DMA clocks
1405          */
1406         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1407         DELAY(5);
1408
1409         /* Make sure (redundant) we've released our request to stay awake */
1410         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1411             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1412
1413         /* Stop the device, and put it in low power state */
1414         iwm_apm_stop(sc);
1415
1416         /* stop and reset the on-board processor */
1417         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1418         DELAY(1000);
1419
1420         /*
1421          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1422          * This is a bug in certain verions of the hardware.
1423          * Certain devices also keep sending HW RF kill interrupt all
1424          * the time, unless the interrupt is ACKed even if the interrupt
1425          * should be masked. Re-ACK all the interrupts here.
1426          */
1427         iwm_disable_interrupts(sc);
1428
1429         /*
1430          * Even if we stop the HW, we still want the RF kill
1431          * interrupt
1432          */
1433         iwm_enable_rfkill_int(sc);
1434         iwm_check_rfkill(sc);
1435 }
1436
1437 static void
1438 iwm_mvm_nic_config(struct iwm_softc *sc)
1439 {
1440         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1441         uint32_t reg_val = 0;
1442
1443         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1444             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1445         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1446             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1447         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1448             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1449
1450         /* SKU control */
1451         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1452             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1453         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1454             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1455
1456         /* radio configuration */
1457         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1458         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1459         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1460
1461         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1462
1463         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1464             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1465             radio_cfg_step, radio_cfg_dash);
1466
1467         /*
1468          * W/A : NIC is stuck in a reset state after Early PCIe power off
1469          * (PCIe power is lost before PERST# is asserted), causing ME FW
1470          * to lose ownership and not being able to obtain it back.
1471          */
1472         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1473                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1474                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1475                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1476         }
1477 }
1478
1479 static int
1480 iwm_nic_rx_init(struct iwm_softc *sc)
1481 {
1482         if (!iwm_nic_lock(sc))
1483                 return EBUSY;
1484
1485         /*
1486          * Initialize RX ring.  This is from the iwn driver.
1487          */
1488         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1489
1490         /* stop DMA */
1491         iwm_disable_rx_dma(sc);
1492         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1493         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1494         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1495         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1496
1497         /* Set physical address of RX ring (256-byte aligned). */
1498         IWM_WRITE(sc,
1499             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1500
1501         /* Set physical address of RX status (16-byte aligned). */
1502         IWM_WRITE(sc,
1503             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1504
1505 #if defined(__DragonFly__)
1506         /* Force serialization (probably not needed but don't trust the HW) */
1507         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1508 #endif
1509
1510         /* Enable RX. */
1511         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1512             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1513             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1514             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1515             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1516             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1517             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1518             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1519
1520         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1521
1522         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1523         if (sc->host_interrupt_operation_mode)
1524                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1525
1526         /*
1527          * Thus sayeth el jefe (iwlwifi) via a comment:
1528          *
1529          * This value should initially be 0 (before preparing any
1530          * RBs), should be 8 after preparing the first 8 RBs (for example)
1531          */
1532         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1533
1534         iwm_nic_unlock(sc);
1535
1536         return 0;
1537 }
1538
1539 static int
1540 iwm_nic_tx_init(struct iwm_softc *sc)
1541 {
1542         int qid;
1543
1544         if (!iwm_nic_lock(sc))
1545                 return EBUSY;
1546
1547         /* Deactivate TX scheduler. */
1548         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1549
1550         /* Set physical address of "keep warm" page (16-byte aligned). */
1551         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1552
1553         /* Initialize TX rings. */
1554         for (qid = 0; qid < nitems(sc->txq); qid++) {
1555                 struct iwm_tx_ring *txq = &sc->txq[qid];
1556
1557                 /* Set physical address of TX ring (256-byte aligned). */
1558                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1559                     txq->desc_dma.paddr >> 8);
1560                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1561                     "%s: loading ring %d descriptors (%p) at %lx\n",
1562                     __func__,
1563                     qid, txq->desc,
1564                     (unsigned long) (txq->desc_dma.paddr >> 8));
1565         }
1566
1567         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1568
1569         iwm_nic_unlock(sc);
1570
1571         return 0;
1572 }
1573
1574 static int
1575 iwm_nic_init(struct iwm_softc *sc)
1576 {
1577         int error;
1578
1579         iwm_apm_init(sc);
1580         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1581                 iwm_set_pwr(sc);
1582
1583         iwm_mvm_nic_config(sc);
1584
1585         if ((error = iwm_nic_rx_init(sc)) != 0)
1586                 return error;
1587
1588         /*
1589          * Ditto for TX, from iwn
1590          */
1591         if ((error = iwm_nic_tx_init(sc)) != 0)
1592                 return error;
1593
1594         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1595             "%s: shadow registers enabled\n", __func__);
1596         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1597
1598         return 0;
1599 }
1600
1601 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1602         IWM_MVM_TX_FIFO_VO,
1603         IWM_MVM_TX_FIFO_VI,
1604         IWM_MVM_TX_FIFO_BE,
1605         IWM_MVM_TX_FIFO_BK,
1606 };
1607
1608 static int
1609 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1610 {
1611         if (!iwm_nic_lock(sc)) {
1612                 device_printf(sc->sc_dev,
1613                     "%s: cannot enable txq %d\n",
1614                     __func__,
1615                     qid);
1616                 return EBUSY;
1617         }
1618
1619         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1620
1621         if (qid == IWM_MVM_CMD_QUEUE) {
1622                 /* unactivate before configuration */
1623                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1624                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1625                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1626
1627                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1628
1629                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1630
1631                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1632                 /* Set scheduler window size and frame limit. */
1633                 iwm_write_mem32(sc,
1634                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1635                     sizeof(uint32_t),
1636                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1637                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1638                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1639                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1640
1641                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1642                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1643                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1644                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1645                     IWM_SCD_QUEUE_STTS_REG_MSK);
1646         } else {
1647                 struct iwm_scd_txq_cfg_cmd cmd;
1648                 int error;
1649
1650                 iwm_nic_unlock(sc);
1651
1652                 memset(&cmd, 0, sizeof(cmd));
1653                 cmd.scd_queue = qid;
1654                 cmd.enable = 1;
1655                 cmd.sta_id = sta_id;
1656                 cmd.tx_fifo = fifo;
1657                 cmd.aggregate = 0;
1658                 cmd.window = IWM_FRAME_LIMIT;
1659
1660                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1661                     sizeof(cmd), &cmd);
1662                 if (error) {
1663                         device_printf(sc->sc_dev,
1664                             "cannot enable txq %d\n", qid);
1665                         return error;
1666                 }
1667
1668                 if (!iwm_nic_lock(sc))
1669                         return EBUSY;
1670         }
1671
1672         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1673             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1674
1675         iwm_nic_unlock(sc);
1676
1677         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1678             __func__, qid, fifo);
1679
1680         return 0;
1681 }
1682
1683 static int
1684 iwm_post_alive(struct iwm_softc *sc)
1685 {
1686         int nwords;
1687         int error, chnl;
1688         uint32_t base;
1689
1690         if (!iwm_nic_lock(sc))
1691                 return EBUSY;
1692
1693         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1694         if (sc->sched_base != base) {
1695                 device_printf(sc->sc_dev,
1696                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1697                     __func__, sc->sched_base, base);
1698         }
1699
1700         iwm_ict_reset(sc);
1701
1702         /* Clear TX scheduler state in SRAM. */
1703         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1704             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1705             / sizeof(uint32_t);
1706         error = iwm_write_mem(sc,
1707             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1708             NULL, nwords);
1709         if (error)
1710                 goto out;
1711
1712         /* Set physical address of TX scheduler rings (1KB aligned). */
1713         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1714
1715         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1716
1717         iwm_nic_unlock(sc);
1718
1719         /* enable command channel */
1720         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1721         if (error)
1722                 return error;
1723
1724         if (!iwm_nic_lock(sc))
1725                 return EBUSY;
1726
1727         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1728
1729         /* Enable DMA channels. */
1730         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1731                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1732                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1733                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1734         }
1735
1736         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1737             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1738
1739         /* Enable L1-Active */
1740         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1741                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1742                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1743         }
1744
1745  out:
1746         iwm_nic_unlock(sc);
1747         return error;
1748 }
1749
1750 /*
1751  * NVM read access and content parsing.  We do not support
1752  * external NVM or writing NVM.
1753  * iwlwifi/mvm/nvm.c
1754  */
1755
1756 /* list of NVM sections we are allowed/need to read */
1757 const int nvm_to_read[] = {
1758         IWM_NVM_SECTION_TYPE_HW,
1759         IWM_NVM_SECTION_TYPE_SW,
1760         IWM_NVM_SECTION_TYPE_REGULATORY,
1761         IWM_NVM_SECTION_TYPE_CALIBRATION,
1762         IWM_NVM_SECTION_TYPE_PRODUCTION,
1763         IWM_NVM_SECTION_TYPE_HW_8000,
1764         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1765         IWM_NVM_SECTION_TYPE_PHY_SKU,
1766 };
1767
1768 /* Default NVM size to read */
1769 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1770 #define IWM_MAX_NVM_SECTION_SIZE        8192
1771
1772 #define IWM_NVM_WRITE_OPCODE 1
1773 #define IWM_NVM_READ_OPCODE 0
1774
1775 /* load nvm chunk response */
1776 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1777 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1778
1779 static int
1780 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1781         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1782 {
1783         offset = 0;
1784         struct iwm_nvm_access_cmd nvm_access_cmd = {
1785                 .offset = htole16(offset),
1786                 .length = htole16(length),
1787                 .type = htole16(section),
1788                 .op_code = IWM_NVM_READ_OPCODE,
1789         };
1790         struct iwm_nvm_access_resp *nvm_resp;
1791         struct iwm_rx_packet *pkt;
1792         struct iwm_host_cmd cmd = {
1793                 .id = IWM_NVM_ACCESS_CMD,
1794                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1795                     IWM_CMD_SEND_IN_RFKILL,
1796                 .data = { &nvm_access_cmd, },
1797         };
1798         int ret, offset_read;
1799         size_t bytes_read;
1800         uint8_t *resp_data;
1801
1802         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1803
1804         ret = iwm_send_cmd(sc, &cmd);
1805         if (ret) {
1806                 device_printf(sc->sc_dev,
1807                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1808                 return ret;
1809         }
1810
1811         pkt = cmd.resp_pkt;
1812         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1813                 device_printf(sc->sc_dev,
1814                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1815                     pkt->hdr.flags);
1816                 ret = EIO;
1817                 goto exit;
1818         }
1819
1820         /* Extract NVM response */
1821         nvm_resp = (void *)pkt->data;
1822
1823         ret = le16toh(nvm_resp->status);
1824         bytes_read = le16toh(nvm_resp->length);
1825         offset_read = le16toh(nvm_resp->offset);
1826         resp_data = nvm_resp->data;
1827         if (ret) {
1828                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1829                     "NVM access command failed with status %d\n", ret);
1830                 ret = EINVAL;
1831                 goto exit;
1832         }
1833
1834         if (offset_read != offset) {
1835                 device_printf(sc->sc_dev,
1836                     "NVM ACCESS response with invalid offset %d\n",
1837                     offset_read);
1838                 ret = EINVAL;
1839                 goto exit;
1840         }
1841
1842         if (bytes_read > length) {
1843                 device_printf(sc->sc_dev,
1844                     "NVM ACCESS response with too much data "
1845                     "(%d bytes requested, %zd bytes received)\n",
1846                     length, bytes_read);
1847                 ret = EINVAL;
1848                 goto exit;
1849         }
1850
1851         memcpy(data + offset, resp_data, bytes_read);
1852         *len = bytes_read;
1853
1854  exit:
1855         iwm_free_resp(sc, &cmd);
1856         return ret;
1857 }
1858
1859 /*
1860  * Reads an NVM section completely.
1861  * NICs prior to 7000 family don't have a real NVM, but just read
1862  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1863  * by uCode, we need to manually check in this case that we don't
1864  * overflow and try to read more than the EEPROM size.
1865  * For 7000 family NICs, we supply the maximal size we can read, and
1866  * the uCode fills the response with as much data as we can,
1867  * without overflowing, so no check is needed.
1868  */
1869 static int
1870 iwm_nvm_read_section(struct iwm_softc *sc,
1871         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1872 {
1873         uint16_t chunklen, seglen;
1874         int error = 0;
1875
1876         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1877             "reading NVM section %d\n", section);
1878
1879         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1880         *len = 0;
1881
1882         /* Read NVM chunks until exhausted (reading less than requested) */
1883         while (seglen == chunklen && *len < max_len) {
1884                 error = iwm_nvm_read_chunk(sc,
1885                     section, *len, chunklen, data, &seglen);
1886                 if (error) {
1887                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1888                             "Cannot read from NVM section "
1889                             "%d at offset %d\n", section, *len);
1890                         return error;
1891                 }
1892                 *len += seglen;
1893         }
1894
1895         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1896             "NVM section %d read completed (%d bytes, error=%d)\n",
1897             section, *len, error);
1898         return error;
1899 }
1900
1901 /* NVM offsets (in words) definitions */
1902 enum iwm_nvm_offsets {
1903         /* NVM HW-Section offset (in words) definitions */
1904         IWM_HW_ADDR = 0x15,
1905
1906 /* NVM SW-Section offset (in words) definitions */
1907         IWM_NVM_SW_SECTION = 0x1C0,
1908         IWM_NVM_VERSION = 0,
1909         IWM_RADIO_CFG = 1,
1910         IWM_SKU = 2,
1911         IWM_N_HW_ADDRS = 3,
1912         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1913
1914 /* NVM calibration section offset (in words) definitions */
1915         IWM_NVM_CALIB_SECTION = 0x2B8,
1916         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1917 };
1918
1919 enum iwm_8000_nvm_offsets {
1920         /* NVM HW-Section offset (in words) definitions */
1921         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1922         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1923         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1924         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1925         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1926
1927         /* NVM SW-Section offset (in words) definitions */
1928         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1929         IWM_NVM_VERSION_8000 = 0,
1930         IWM_RADIO_CFG_8000 = 0,
1931         IWM_SKU_8000 = 2,
1932         IWM_N_HW_ADDRS_8000 = 3,
1933
1934         /* NVM REGULATORY -Section offset (in words) definitions */
1935         IWM_NVM_CHANNELS_8000 = 0,
1936         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1937         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1938         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1939
1940         /* NVM calibration section offset (in words) definitions */
1941         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1942         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1943 };
1944
1945 /* SKU Capabilities (actual values from NVM definition) */
1946 enum nvm_sku_bits {
1947         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1948         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1949         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1950         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1951 };
1952
1953 /* radio config bits (actual values from NVM definition) */
1954 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1955 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1956 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1957 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1958 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1959 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1960
1961 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1962 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1963 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1964 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1965 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1966 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1967
1968 #define DEFAULT_MAX_TX_POWER 16
1969
1970 /**
1971  * enum iwm_nvm_channel_flags - channel flags in NVM
1972  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1973  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1974  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1975  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1976  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1977  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1978  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1979  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1980  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1981  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1982  */
1983 enum iwm_nvm_channel_flags {
1984         IWM_NVM_CHANNEL_VALID = (1 << 0),
1985         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1986         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1987         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1988         IWM_NVM_CHANNEL_DFS = (1 << 7),
1989         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1990         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1991         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1992         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1993 };
1994
1995 /*
1996  * Translate EEPROM flags to net80211.
1997  */
1998 static uint32_t
1999 iwm_eeprom_channel_flags(uint16_t ch_flags)
2000 {
2001         uint32_t nflags;
2002
2003         nflags = 0;
2004         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2005                 nflags |= IEEE80211_CHAN_PASSIVE;
2006         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2007                 nflags |= IEEE80211_CHAN_NOADHOC;
2008         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2009                 nflags |= IEEE80211_CHAN_DFS;
2010                 /* Just in case. */
2011                 nflags |= IEEE80211_CHAN_NOADHOC;
2012         }
2013
2014         return (nflags);
2015 }
2016
2017 static void
2018 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2019     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2020     const uint8_t bands[])
2021 {
2022         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
2023         uint32_t nflags;
2024         uint16_t ch_flags;
2025         uint8_t ieee;
2026         int error;
2027
2028         for (; ch_idx < ch_num; ch_idx++) {
2029                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2030                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2031                         ieee = iwm_nvm_channels[ch_idx];
2032                 else
2033                         ieee = iwm_nvm_channels_8000[ch_idx];
2034
2035                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2036                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2037                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2038                             ieee, ch_flags,
2039                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2040                             "5.2" : "2.4");
2041                         continue;
2042                 }
2043
2044                 nflags = iwm_eeprom_channel_flags(ch_flags);
2045                 error = ieee80211_add_channel(chans, maxchans, nchans,
2046                     ieee, 0, 0, nflags, bands);
2047                 if (error != 0)
2048                         break;
2049
2050                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2051                     "Ch. %d Flags %x [%sGHz] - Added\n",
2052                     ieee, ch_flags,
2053                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2054                     "5.2" : "2.4");
2055         }
2056 }
2057
2058 static void
2059 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2060     struct ieee80211_channel chans[])
2061 {
2062         struct iwm_softc *sc = ic->ic_softc;
2063         struct iwm_nvm_data *data = &sc->sc_nvm;
2064         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2065         size_t ch_num;
2066
2067         memset(bands, 0, sizeof(bands));
2068         /* 1-13: 11b/g channels. */
2069         setbit(bands, IEEE80211_MODE_11B);
2070         setbit(bands, IEEE80211_MODE_11G);
2071         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2072             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2073
2074         /* 14: 11b channel only. */
2075         clrbit(bands, IEEE80211_MODE_11G);
2076         iwm_add_channel_band(sc, chans, maxchans, nchans,
2077             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2078
2079         if (data->sku_cap_band_52GHz_enable) {
2080                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2081                         ch_num = nitems(iwm_nvm_channels);
2082                 else
2083                         ch_num = nitems(iwm_nvm_channels_8000);
2084                 memset(bands, 0, sizeof(bands));
2085                 setbit(bands, IEEE80211_MODE_11A);
2086                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2087                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2088         }
2089 }
2090
2091 static void
2092 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2093         const uint16_t *mac_override, const uint16_t *nvm_hw)
2094 {
2095         const uint8_t *hw_addr;
2096
2097         if (mac_override) {
2098                 static const uint8_t reserved_mac[] = {
2099                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2100                 };
2101
2102                 hw_addr = (const uint8_t *)(mac_override +
2103                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2104
2105                 /*
2106                  * Store the MAC address from MAO section.
2107                  * No byte swapping is required in MAO section
2108                  */
2109                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2110
2111                 /*
2112                  * Force the use of the OTP MAC address in case of reserved MAC
2113                  * address in the NVM, or if address is given but invalid.
2114                  */
2115                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2116                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2117                     iwm_is_valid_ether_addr(data->hw_addr) &&
2118                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2119                         return;
2120
2121                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2122                     "%s: mac address from nvm override section invalid\n",
2123                     __func__);
2124         }
2125
2126         if (nvm_hw) {
2127                 /* read the mac address from WFMP registers */
2128                 uint32_t mac_addr0 =
2129                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2130                 uint32_t mac_addr1 =
2131                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2132
2133                 hw_addr = (const uint8_t *)&mac_addr0;
2134                 data->hw_addr[0] = hw_addr[3];
2135                 data->hw_addr[1] = hw_addr[2];
2136                 data->hw_addr[2] = hw_addr[1];
2137                 data->hw_addr[3] = hw_addr[0];
2138
2139                 hw_addr = (const uint8_t *)&mac_addr1;
2140                 data->hw_addr[4] = hw_addr[1];
2141                 data->hw_addr[5] = hw_addr[0];
2142
2143                 return;
2144         }
2145
2146         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2147         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2148 }
2149
2150 static int
2151 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2152             const uint16_t *phy_sku)
2153 {
2154         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2155                 return le16_to_cpup(nvm_sw + IWM_SKU);
2156
2157         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2158 }
2159
2160 static int
2161 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2162 {
2163         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2164                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2165         else
2166                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2167                                                 IWM_NVM_VERSION_8000));
2168 }
2169
2170 static int
2171 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2172                   const uint16_t *phy_sku)
2173 {
2174         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2175                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2176
2177         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2178 }
2179
2180 static int
2181 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2182 {
2183         int n_hw_addr;
2184
2185         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2186                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2187
2188         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2189
2190         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2191 }
2192
2193 static void
2194 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2195                   uint32_t radio_cfg)
2196 {
2197         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2198                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2199                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2200                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2201                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2202                 return;
2203         }
2204
2205         /* set the radio configuration for family 8000 */
2206         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2207         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2208         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2209         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2210         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2211         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2212 }
2213
2214 static int
2215 iwm_parse_nvm_data(struct iwm_softc *sc,
2216                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2217                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2218                    const uint16_t *phy_sku, const uint16_t *regulatory)
2219 {
2220         struct iwm_nvm_data *data = &sc->sc_nvm;
2221         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2222         uint32_t sku, radio_cfg;
2223
2224         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2225
2226         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2227         iwm_set_radio_cfg(sc, data, radio_cfg);
2228
2229         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2230         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2231         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2232         data->sku_cap_11n_enable = 0;
2233
2234         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2235
2236         /* The byte order is little endian 16 bit, meaning 214365 */
2237         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2238                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2239                 data->hw_addr[0] = hw_addr[1];
2240                 data->hw_addr[1] = hw_addr[0];
2241                 data->hw_addr[2] = hw_addr[3];
2242                 data->hw_addr[3] = hw_addr[2];
2243                 data->hw_addr[4] = hw_addr[5];
2244                 data->hw_addr[5] = hw_addr[4];
2245         } else {
2246                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2247         }
2248
2249         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2250                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2251                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2252         } else {
2253                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2254                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2255         }
2256         data->calib_version = 255;   /* TODO:
2257                                         this value will prevent some checks from
2258                                         failing, we need to check if this
2259                                         field is still needed, and if it does,
2260                                         where is it in the NVM */
2261
2262         return 0;
2263 }
2264
2265 static int
2266 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2267 {
2268         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2269
2270         /* Checking for required sections */
2271         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2272                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2273                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2274                         device_printf(sc->sc_dev,
2275                             "Can't parse empty OTP/NVM sections\n");
2276                         return ENOENT;
2277                 }
2278
2279                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2280         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2281                 /* SW and REGULATORY sections are mandatory */
2282                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2283                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2284                         device_printf(sc->sc_dev,
2285                             "Can't parse empty OTP/NVM sections\n");
2286                         return ENOENT;
2287                 }
2288                 /* MAC_OVERRIDE or at least HW section must exist */
2289                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2290                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2291                         device_printf(sc->sc_dev,
2292                             "Can't parse mac_address, empty sections\n");
2293                         return ENOENT;
2294                 }
2295
2296                 /* PHY_SKU section is mandatory in B0 */
2297                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2298                         device_printf(sc->sc_dev,
2299                             "Can't parse phy_sku in B0, empty sections\n");
2300                         return ENOENT;
2301                 }
2302
2303                 hw = (const uint16_t *)
2304                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2305         } else {
2306                 panic("unknown device family %d\n", sc->sc_device_family);
2307         }
2308
2309         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2310         calib = (const uint16_t *)
2311             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2312         regulatory = (const uint16_t *)
2313             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2314         mac_override = (const uint16_t *)
2315             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2316         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2317
2318         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2319             phy_sku, regulatory);
2320 }
2321
2322 static int
2323 iwm_nvm_init(struct iwm_softc *sc)
2324 {
2325         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2326         int i, section, error;
2327         uint16_t len;
2328         uint8_t *buf;
2329         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2330
2331         memset(nvm_sections, 0 , sizeof(nvm_sections));
2332
2333         buf = kmalloc(bufsz, M_DEVBUF, M_INTWAIT);
2334         if (buf == NULL)
2335                 return ENOMEM;
2336
2337         for (i = 0; i < nitems(nvm_to_read); i++) {
2338                 section = nvm_to_read[i];
2339                 KKASSERT(section <= nitems(nvm_sections));
2340
2341                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2342                 if (error) {
2343                         error = 0;
2344                         continue;
2345                 }
2346                 nvm_sections[section].data = kmalloc(len, M_DEVBUF, M_INTWAIT);
2347                 if (nvm_sections[section].data == NULL) {
2348                         error = ENOMEM;
2349                         break;
2350                 }
2351                 memcpy(nvm_sections[section].data, buf, len);
2352                 nvm_sections[section].length = len;
2353         }
2354         kfree(buf, M_DEVBUF);
2355         if (error == 0)
2356                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2357
2358         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2359                 if (nvm_sections[i].data != NULL)
2360                         kfree(nvm_sections[i].data, M_DEVBUF);
2361         }
2362
2363         return error;
2364 }
2365
2366 /*
2367  * Firmware loading gunk.  This is kind of a weird hybrid between the
2368  * iwn driver and the Linux iwlwifi driver.
2369  */
2370
2371 static int
2372 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2373         const uint8_t *section, uint32_t byte_cnt)
2374 {
2375         int error = EINVAL;
2376         uint32_t chunk_sz, offset;
2377
2378         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2379
2380         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2381                 uint32_t addr, len;
2382                 const uint8_t *data;
2383
2384                 addr = dst_addr + offset;
2385                 len = MIN(chunk_sz, byte_cnt - offset);
2386                 data = section + offset;
2387
2388                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2389                 if (error)
2390                         break;
2391         }
2392
2393         return error;
2394 }
2395
2396 static int
2397 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2398         const uint8_t *chunk, uint32_t byte_cnt)
2399 {
2400         struct iwm_dma_info *dma = &sc->fw_dma;
2401         int error;
2402
2403         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2404         memcpy(dma->vaddr, chunk, byte_cnt);
2405         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2406
2407         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2408             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2409                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2410                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2411         }
2412
2413         sc->sc_fw_chunk_done = 0;
2414
2415         if (!iwm_nic_lock(sc))
2416                 return EBUSY;
2417
2418         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2419             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2420         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2421             dst_addr);
2422         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2423             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2424         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2425             (iwm_get_dma_hi_addr(dma->paddr)
2426               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2427         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2428             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2429             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2430             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2431         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2432             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2433             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2434             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2435
2436         iwm_nic_unlock(sc);
2437
2438         /* wait 1s for this segment to load */
2439         error = 0;
2440         while (!sc->sc_fw_chunk_done) {
2441 #if defined(__DragonFly__)
2442                 error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2443 #else
2444                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2445 #endif
2446                 if (error)
2447                         break;
2448         }
2449
2450         if (!sc->sc_fw_chunk_done) {
2451                 device_printf(sc->sc_dev,
2452                     "fw chunk addr 0x%x len %d failed to load\n",
2453                     dst_addr, byte_cnt);
2454         }
2455
2456         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2457             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2458                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2459                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2460                 iwm_nic_unlock(sc);
2461         }
2462
2463         return error;
2464 }
2465
2466 int
2467 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2468     int cpu, int *first_ucode_section)
2469 {
2470         int shift_param;
2471         int i, error = 0, sec_num = 0x1;
2472         uint32_t val, last_read_idx = 0;
2473         const void *data;
2474         uint32_t dlen;
2475         uint32_t offset;
2476
2477         if (cpu == 1) {
2478                 shift_param = 0;
2479                 *first_ucode_section = 0;
2480         } else {
2481                 shift_param = 16;
2482                 (*first_ucode_section)++;
2483         }
2484
2485         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2486                 last_read_idx = i;
2487                 data = fws->fw_sect[i].fws_data;
2488                 dlen = fws->fw_sect[i].fws_len;
2489                 offset = fws->fw_sect[i].fws_devoff;
2490
2491                 /*
2492                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2493                  * CPU1 to CPU2.
2494                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2495                  * CPU2 non paged to CPU2 paging sec.
2496                  */
2497                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2498                     offset == IWM_PAGING_SEPARATOR_SECTION)
2499                         break;
2500
2501                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2502                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2503                     i, offset, dlen, cpu);
2504
2505                 if (dlen > sc->sc_fwdmasegsz) {
2506                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2507                             "chunk %d too large (%d bytes)\n", i, dlen);
2508                         error = EFBIG;
2509                 } else {
2510                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2511                 }
2512                 if (error) {
2513                         device_printf(sc->sc_dev,
2514                             "could not load firmware chunk %d (error %d)\n",
2515                             i, error);
2516                         return error;
2517                 }
2518
2519                 /* Notify the ucode of the loaded section number and status */
2520                 if (iwm_nic_lock(sc)) {
2521                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2522                         val = val | (sec_num << shift_param);
2523                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2524                         sec_num = (sec_num << 1) | 0x1;
2525                         iwm_nic_unlock(sc);
2526
2527                         /*
2528                          * The firmware won't load correctly without this delay.
2529                          */
2530                         DELAY(8000);
2531                 }
2532         }
2533
2534         *first_ucode_section = last_read_idx;
2535
2536         if (iwm_nic_lock(sc)) {
2537                 if (cpu == 1)
2538                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2539                 else
2540                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2541                 iwm_nic_unlock(sc);
2542         }
2543
2544         return 0;
2545 }
2546
2547 int
2548 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2549 {
2550         struct iwm_fw_sects *fws;
2551         int error = 0;
2552         int first_ucode_section;
2553
2554         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2555             ucode_type);
2556
2557         fws = &sc->sc_fw.fw_sects[ucode_type];
2558
2559         /* configure the ucode to be ready to get the secured image */
2560         /* release CPU reset */
2561         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2562
2563         /* load to FW the binary Secured sections of CPU1 */
2564         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2565         if (error)
2566                 return error;
2567
2568         /* load to FW the binary sections of CPU2 */
2569         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2570 }
2571
2572 static int
2573 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2574 {
2575         struct iwm_fw_sects *fws;
2576         int error, i;
2577         const void *data;
2578         uint32_t dlen;
2579         uint32_t offset;
2580
2581         sc->sc_uc.uc_intr = 0;
2582
2583         fws = &sc->sc_fw.fw_sects[ucode_type];
2584         for (i = 0; i < fws->fw_count; i++) {
2585                 data = fws->fw_sect[i].fws_data;
2586                 dlen = fws->fw_sect[i].fws_len;
2587                 offset = fws->fw_sect[i].fws_devoff;
2588                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2589                     "LOAD FIRMWARE type %d offset %u len %d\n",
2590                     ucode_type, offset, dlen);
2591                 if (dlen > sc->sc_fwdmasegsz) {
2592                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2593                             "chunk %d too large (%d bytes)\n", i, dlen);
2594                         error = EFBIG;
2595                 } else {
2596                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2597                 }
2598                 if (error) {
2599                         device_printf(sc->sc_dev,
2600                             "could not load firmware chunk %u of %u "
2601                             "(error=%d)\n", i, fws->fw_count, error);
2602                         return error;
2603                 }
2604         }
2605
2606         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2607
2608         return 0;
2609 }
2610
2611 static int
2612 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2613 {
2614         int error, w;
2615
2616         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2617                 error = iwm_load_firmware_8000(sc, ucode_type);
2618         else
2619                 error = iwm_load_firmware_7000(sc, ucode_type);
2620         if (error)
2621                 return error;
2622
2623         /* wait for the firmware to load */
2624         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2625 #if defined(__DragonFly__)
2626                 error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2627 #else
2628                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2629 #endif
2630         }
2631         if (error || !sc->sc_uc.uc_ok) {
2632                 device_printf(sc->sc_dev, "could not load firmware\n");
2633                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2634                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2635                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2636                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2637                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2638                 }
2639         }
2640
2641         /*
2642          * Give the firmware some time to initialize.
2643          * Accessing it too early causes errors.
2644          */
2645         lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2646
2647         return error;
2648 }
2649
2650 static int
2651 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2652 {
2653         int error;
2654
2655         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2656
2657         if ((error = iwm_nic_init(sc)) != 0) {
2658                 device_printf(sc->sc_dev, "unable to init nic\n");
2659                 return error;
2660         }
2661
2662         /* make sure rfkill handshake bits are cleared */
2663         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2664         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2665             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2666
2667         /* clear (again), then enable host interrupts */
2668         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2669         iwm_enable_interrupts(sc);
2670
2671         /* really make sure rfkill handshake bits are cleared */
2672         /* maybe we should write a few times more?  just to make sure */
2673         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2674         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2675
2676         /* Load the given image to the HW */
2677         return iwm_load_firmware(sc, ucode_type);
2678 }
2679
2680 static int
2681 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2682 {
2683         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2684                 .valid = htole32(valid_tx_ant),
2685         };
2686
2687         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2688             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2689 }
2690
2691 static int
2692 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2693 {
2694         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2695         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2696
2697         /* Set parameters */
2698         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2699         phy_cfg_cmd.calib_control.event_trigger =
2700             sc->sc_default_calib[ucode_type].event_trigger;
2701         phy_cfg_cmd.calib_control.flow_trigger =
2702             sc->sc_default_calib[ucode_type].flow_trigger;
2703
2704         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2705             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2706         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2707             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2708 }
2709
2710 static int
2711 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2712         enum iwm_ucode_type ucode_type)
2713 {
2714         enum iwm_ucode_type old_type = sc->sc_uc_current;
2715         int error;
2716
2717         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2718                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2719                         error);
2720                 return error;
2721         }
2722
2723         sc->sc_uc_current = ucode_type;
2724         error = iwm_start_fw(sc, ucode_type);
2725         if (error) {
2726                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2727                 sc->sc_uc_current = old_type;
2728                 return error;
2729         }
2730
2731         error = iwm_post_alive(sc);
2732         if (error) {
2733                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2734         }
2735         return error;
2736 }
2737
2738 /*
2739  * mvm misc bits
2740  */
2741
2742 static int
2743 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2744 {
2745         int error;
2746
2747         /* do not operate with rfkill switch turned on */
2748         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2749                 device_printf(sc->sc_dev,
2750                     "radio is disabled by hardware switch\n");
2751                 return EPERM;
2752         }
2753
2754         sc->sc_init_complete = 0;
2755         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2756             IWM_UCODE_TYPE_INIT)) != 0) {
2757                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2758                 return error;
2759         }
2760
2761         if (justnvm) {
2762                 if ((error = iwm_nvm_init(sc)) != 0) {
2763                         device_printf(sc->sc_dev, "failed to read nvm\n");
2764                         return error;
2765                 }
2766                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2767
2768                 return 0;
2769         }
2770
2771         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2772                 device_printf(sc->sc_dev,
2773                     "failed to send bt coex configuration: %d\n", error);
2774                 return error;
2775         }
2776
2777         /* Init Smart FIFO. */
2778         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2779         if (error != 0)
2780                 return error;
2781
2782         /* Send TX valid antennas before triggering calibrations */
2783         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2784                 device_printf(sc->sc_dev,
2785                     "failed to send antennas before calibration: %d\n", error);
2786                 return error;
2787         }
2788
2789         /*
2790          * Send phy configurations command to init uCode
2791          * to start the 16.0 uCode init image internal calibrations.
2792          */
2793         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2794                 device_printf(sc->sc_dev,
2795                     "%s: failed to run internal calibration: %d\n",
2796                     __func__, error);
2797                 return error;
2798         }
2799
2800         /*
2801          * Nothing to do but wait for the init complete notification
2802          * from the firmware
2803          */
2804         while (!sc->sc_init_complete) {
2805 #if defined(__DragonFly__)
2806                 error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
2807                                  0, "iwminit", 2*hz);
2808 #else
2809                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2810                                  0, "iwminit", 2*hz);
2811 #endif
2812                 if (error) {
2813                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2814                                 sc->sc_init_complete);
2815                         break;
2816                 }
2817         }
2818
2819         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2820             sc->sc_init_complete ? "" : "not ");
2821
2822         return error;
2823 }
2824
2825 /*
2826  * receive side
2827  */
2828
2829 /* (re)stock rx ring, called at init-time and at runtime */
2830 static int
2831 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2832 {
2833         struct iwm_rx_ring *ring = &sc->rxq;
2834         struct iwm_rx_data *data = &ring->data[idx];
2835         struct mbuf *m;
2836         bus_dmamap_t dmamap = NULL;
2837         bus_dma_segment_t seg;
2838         int nsegs, error;
2839
2840         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2841         if (m == NULL)
2842                 return ENOBUFS;
2843
2844         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2845 #if defined(__DragonFly__)
2846         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2847             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2848 #else
2849         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2850             &seg, &nsegs, BUS_DMA_NOWAIT);
2851 #endif
2852         if (error != 0) {
2853                 device_printf(sc->sc_dev,
2854                     "%s: can't map mbuf, error %d\n", __func__, error);
2855                 goto fail;
2856         }
2857
2858         if (data->m != NULL)
2859                 bus_dmamap_unload(ring->data_dmat, data->map);
2860
2861         /* Swap ring->spare_map with data->map */
2862         dmamap = data->map;
2863         data->map = ring->spare_map;
2864         ring->spare_map = dmamap;
2865
2866         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2867         data->m = m;
2868
2869         /* Update RX descriptor. */
2870         KKASSERT((seg.ds_addr & 255) == 0);
2871         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2872         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2873             BUS_DMASYNC_PREWRITE);
2874
2875         return 0;
2876 fail:
2877         m_freem(m);
2878         return error;
2879 }
2880
2881 #define IWM_RSSI_OFFSET 50
2882 static int
2883 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2884 {
2885         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2886         uint32_t agc_a, agc_b;
2887         uint32_t val;
2888
2889         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2890         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2891         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2892
2893         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2894         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2895         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2896
2897         /*
2898          * dBm = rssi dB - agc dB - constant.
2899          * Higher AGC (higher radio gain) means lower signal.
2900          */
2901         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2902         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2903         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2904
2905         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2906             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2907             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2908
2909         return max_rssi_dbm;
2910 }
2911
2912 /*
2913  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2914  * values are reported by the fw as positive values - need to negate
2915  * to obtain their dBM.  Account for missing antennas by replacing 0
2916  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2917  */
2918 static int
2919 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2920 {
2921         int energy_a, energy_b, energy_c, max_energy;
2922         uint32_t val;
2923
2924         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2925         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2926             IWM_RX_INFO_ENERGY_ANT_A_POS;
2927         energy_a = energy_a ? -energy_a : -256;
2928         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2929             IWM_RX_INFO_ENERGY_ANT_B_POS;
2930         energy_b = energy_b ? -energy_b : -256;
2931         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2932             IWM_RX_INFO_ENERGY_ANT_C_POS;
2933         energy_c = energy_c ? -energy_c : -256;
2934         max_energy = MAX(energy_a, energy_b);
2935         max_energy = MAX(max_energy, energy_c);
2936
2937         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2938             "energy In A %d B %d C %d , and max %d\n",
2939             energy_a, energy_b, energy_c, max_energy);
2940
2941         return max_energy;
2942 }
2943
2944 static void
2945 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2946         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2947 {
2948         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2949
2950         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2951         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2952
2953         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2954 }
2955
2956 /*
2957  * Retrieve the average noise (in dBm) among receivers.
2958  */
2959 static int
2960 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2961 {
2962         int i, total, nbant, noise;
2963
2964         total = nbant = noise = 0;
2965         for (i = 0; i < 3; i++) {
2966                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2967                 if (noise) {
2968                         total += noise;
2969                         nbant++;
2970                 }
2971         }
2972
2973         /* There should be at least one antenna but check anyway. */
2974         return (nbant == 0) ? -127 : (total / nbant) - 107;
2975 }
2976
2977 /*
2978  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2979  *
2980  * Handles the actual data of the Rx packet from the fw
2981  */
2982 static void
2983 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2984         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2985 {
2986         struct ieee80211com *ic = &sc->sc_ic;
2987         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2988         struct ieee80211_frame *wh;
2989         struct ieee80211_node *ni;
2990         struct ieee80211_rx_stats rxs;
2991         struct mbuf *m;
2992         struct iwm_rx_phy_info *phy_info;
2993         struct iwm_rx_mpdu_res_start *rx_res;
2994         uint32_t len;
2995         uint32_t rx_pkt_status;
2996         int rssi;
2997
2998         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2999
3000         phy_info = &sc->sc_last_phy_info;
3001         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3002         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3003         len = le16toh(rx_res->byte_count);
3004         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3005
3006         m = data->m;
3007         m->m_data = pkt->data + sizeof(*rx_res);
3008         m->m_pkthdr.len = m->m_len = len;
3009
3010         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3011                 device_printf(sc->sc_dev,
3012                     "dsp size out of range [0,20]: %d\n",
3013                     phy_info->cfg_phy_cnt);
3014                 return;
3015         }
3016
3017         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3018             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3019                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3020                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3021                 return; /* drop */
3022         }
3023
3024         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3025                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3026         } else {
3027                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3028         }
3029         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
3030         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
3031
3032         /* replenish ring for the buffer we're going to feed to the sharks */
3033         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3034                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3035                     __func__);
3036                 return;
3037         }
3038
3039         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3040
3041         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3042             "%s: phy_info: channel=%d, flags=0x%08x\n",
3043             __func__,
3044             le16toh(phy_info->channel),
3045             le16toh(phy_info->phy_flags));
3046
3047         /*
3048          * Populate an RX state struct with the provided information.
3049          */
3050         bzero(&rxs, sizeof(rxs));
3051         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3052         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3053         rxs.c_ieee = le16toh(phy_info->channel);
3054         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3055                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3056         } else {
3057                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3058         }
3059         rxs.rssi = rssi - sc->sc_noise;
3060         rxs.nf = sc->sc_noise;
3061
3062         if (ieee80211_radiotap_active_vap(vap)) {
3063                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3064
3065                 tap->wr_flags = 0;
3066                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3067                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3068                 tap->wr_chan_freq = htole16(rxs.c_freq);
3069                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3070                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3071                 tap->wr_dbm_antsignal = (int8_t)rssi;
3072                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3073                 tap->wr_tsft = phy_info->system_timestamp;
3074                 switch (phy_info->rate) {
3075                 /* CCK rates. */
3076                 case  10: tap->wr_rate =   2; break;
3077                 case  20: tap->wr_rate =   4; break;
3078                 case  55: tap->wr_rate =  11; break;
3079                 case 110: tap->wr_rate =  22; break;
3080                 /* OFDM rates. */
3081                 case 0xd: tap->wr_rate =  12; break;
3082                 case 0xf: tap->wr_rate =  18; break;
3083                 case 0x5: tap->wr_rate =  24; break;
3084                 case 0x7: tap->wr_rate =  36; break;
3085                 case 0x9: tap->wr_rate =  48; break;
3086                 case 0xb: tap->wr_rate =  72; break;
3087                 case 0x1: tap->wr_rate =  96; break;
3088                 case 0x3: tap->wr_rate = 108; break;
3089                 /* Unknown rate: should not happen. */
3090                 default:  tap->wr_rate =   0;
3091                 }
3092         }
3093
3094         IWM_UNLOCK(sc);
3095         if (ni != NULL) {
3096                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3097                 ieee80211_input_mimo(ni, m, &rxs);
3098                 ieee80211_free_node(ni);
3099         } else {
3100                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3101                 ieee80211_input_mimo_all(ic, m, &rxs);
3102         }
3103         IWM_LOCK(sc);
3104 }
3105
3106 static int
3107 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3108         struct iwm_node *in)
3109 {
3110         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3111         struct ieee80211_node *ni = &in->in_ni;
3112         struct ieee80211vap *vap = ni->ni_vap;
3113         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3114         int failack = tx_resp->failure_frame;
3115
3116         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3117
3118         /* Update rate control statistics. */
3119         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3120             __func__,
3121             (int) le16toh(tx_resp->status.status),
3122             (int) le16toh(tx_resp->status.sequence),
3123             tx_resp->frame_count,
3124             tx_resp->bt_kill_count,
3125             tx_resp->failure_rts,
3126             tx_resp->failure_frame,
3127             le32toh(tx_resp->initial_rate),
3128             (int) le16toh(tx_resp->wireless_media_time));
3129
3130         if (status != IWM_TX_STATUS_SUCCESS &&
3131             status != IWM_TX_STATUS_DIRECT_DONE) {
3132                 ieee80211_ratectl_tx_complete(vap, ni,
3133                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3134                 return (1);
3135         } else {
3136                 ieee80211_ratectl_tx_complete(vap, ni,
3137                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3138                 return (0);
3139         }
3140 }
3141
3142 static void
3143 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3144         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3145 {
3146         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3147         int idx = cmd_hdr->idx;
3148         int qid = cmd_hdr->qid;
3149         struct iwm_tx_ring *ring = &sc->txq[qid];
3150         struct iwm_tx_data *txd = &ring->data[idx];
3151         struct iwm_node *in = txd->in;
3152         struct mbuf *m = txd->m;
3153         int status;
3154
3155         KASSERT(txd->done == 0, ("txd not done"));
3156         KASSERT(txd->in != NULL, ("txd without node"));
3157         KASSERT(txd->m != NULL, ("txd without mbuf"));
3158
3159         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3160
3161         sc->sc_tx_timer = 0;
3162
3163         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3164
3165         /* Unmap and free mbuf. */
3166         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3167         bus_dmamap_unload(ring->data_dmat, txd->map);
3168
3169         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3170             "free txd %p, in %p\n", txd, txd->in);
3171         txd->done = 1;
3172         txd->m = NULL;
3173         txd->in = NULL;
3174
3175         ieee80211_tx_complete(&in->in_ni, m, status);
3176
3177         if (--ring->queued < IWM_TX_RING_LOMARK) {
3178                 sc->qfullmsk &= ~(1 << ring->qid);
3179                 if (sc->qfullmsk == 0) {
3180                         /*
3181                          * Well, we're in interrupt context, but then again
3182                          * I guess net80211 does all sorts of stunts in
3183                          * interrupt context, so maybe this is no biggie.
3184                          */
3185                         iwm_start(sc);
3186                 }
3187         }
3188 }
3189
3190 /*
3191  * transmit side
3192  */
3193
3194 /*
3195  * Process a "command done" firmware notification.  This is where we wakeup
3196  * processes waiting for a synchronous command completion.
3197  * from if_iwn
3198  */
3199 static void
3200 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3201 {
3202         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3203         struct iwm_tx_data *data;
3204
3205         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3206                 return; /* Not a command ack. */
3207         }
3208
3209         data = &ring->data[pkt->hdr.idx];
3210
3211         /* If the command was mapped in an mbuf, free it. */
3212         if (data->m != NULL) {
3213                 bus_dmamap_sync(ring->data_dmat, data->map,
3214                     BUS_DMASYNC_POSTWRITE);
3215                 bus_dmamap_unload(ring->data_dmat, data->map);
3216                 m_freem(data->m);
3217                 data->m = NULL;
3218         }
3219         wakeup(&ring->desc[pkt->hdr.idx]);
3220 }
3221
3222 #if 0
3223 /*
3224  * necessary only for block ack mode
3225  */
3226 void
3227 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3228         uint16_t len)
3229 {
3230         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3231         uint16_t w_val;
3232
3233         scd_bc_tbl = sc->sched_dma.vaddr;
3234
3235         len += 8; /* magic numbers came naturally from paris */
3236         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3237                 len = roundup(len, 4) / 4;
3238
3239         w_val = htole16(sta_id << 12 | len);
3240
3241         /* Update TX scheduler. */
3242         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3243         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3244             BUS_DMASYNC_PREWRITE);
3245
3246         /* I really wonder what this is ?!? */
3247         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3248                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3249                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3250                     BUS_DMASYNC_PREWRITE);
3251         }
3252 }
3253 #endif
3254
3255 /*
3256  * Take an 802.11 (non-n) rate, find the relevant rate
3257  * table entry.  return the index into in_ridx[].
3258  *
3259  * The caller then uses that index back into in_ridx
3260  * to figure out the rate index programmed /into/
3261  * the firmware for this given node.
3262  */
3263 static int
3264 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3265     uint8_t rate)
3266 {
3267         int i;
3268         uint8_t r;
3269
3270         for (i = 0; i < nitems(in->in_ridx); i++) {
3271                 r = iwm_rates[in->in_ridx[i]].rate;
3272                 if (rate == r)
3273                         return (i);
3274         }
3275         /* XXX Return the first */
3276         /* XXX TODO: have it return the /lowest/ */
3277         return (0);
3278 }
3279
3280 /*
3281  * Fill in the rate related information for a transmit command.
3282  */
3283 static const struct iwm_rate *
3284 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3285         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3286 {
3287         struct ieee80211com *ic = &sc->sc_ic;
3288         struct ieee80211_node *ni = &in->in_ni;
3289         const struct iwm_rate *rinfo;
3290         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3291         int ridx, rate_flags;
3292
3293         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3294         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3295
3296         /*
3297          * XXX TODO: everything about the rate selection here is terrible!
3298          */
3299
3300         if (type == IEEE80211_FC0_TYPE_DATA) {
3301                 int i;
3302                 /* for data frames, use RS table */
3303                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3304                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3305                 ridx = in->in_ridx[i];
3306
3307                 /* This is the index into the programmed table */
3308                 tx->initial_rate_index = i;
3309                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3310                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3311                     "%s: start with i=%d, txrate %d\n",
3312                     __func__, i, iwm_rates[ridx].rate);
3313         } else {
3314                 /*
3315                  * For non-data, use the lowest supported rate for the given
3316                  * operational mode.
3317                  *
3318                  * Note: there may not be any rate control information available.
3319                  * This driver currently assumes if we're transmitting data
3320                  * frames, use the rate control table.  Grr.
3321                  *
3322                  * XXX TODO: use the configured rate for the traffic type!
3323                  * XXX TODO: this should be per-vap, not curmode; as we later
3324                  * on we'll want to handle off-channel stuff (eg TDLS).
3325                  */
3326                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3327                         /*
3328                          * XXX this assumes the mode is either 11a or not 11a;
3329                          * definitely won't work for 11n.
3330                          */
3331                         ridx = IWM_RIDX_OFDM;
3332                 } else {
3333                         ridx = IWM_RIDX_CCK;
3334                 }
3335         }
3336
3337         rinfo = &iwm_rates[ridx];
3338
3339         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3340             __func__, ridx,
3341             rinfo->rate,
3342             !! (IWM_RIDX_IS_CCK(ridx))
3343             );
3344
3345         /* XXX TODO: hard-coded TX antenna? */
3346         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3347         if (IWM_RIDX_IS_CCK(ridx))
3348                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3349         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3350
3351         return rinfo;
3352 }
3353
3354 #define TB0_SIZE 16
3355 static int
3356 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3357 {
3358         struct ieee80211com *ic = &sc->sc_ic;
3359         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3360         struct iwm_node *in = IWM_NODE(ni);
3361         struct iwm_tx_ring *ring;
3362         struct iwm_tx_data *data;
3363         struct iwm_tfd *desc;
3364         struct iwm_device_cmd *cmd;
3365         struct iwm_tx_cmd *tx;
3366         struct ieee80211_frame *wh;
3367         struct ieee80211_key *k = NULL;
3368 #if !defined(__DragonFly__)
3369         struct mbuf *m1;
3370 #endif
3371         const struct iwm_rate *rinfo;
3372         uint32_t flags;
3373         u_int hdrlen;
3374         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3375         int nsegs;
3376         uint8_t tid, type;
3377         int i, totlen, error, pad;
3378
3379         wh = mtod(m, struct ieee80211_frame *);
3380         hdrlen = ieee80211_anyhdrsize(wh);
3381         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3382         tid = 0;
3383         ring = &sc->txq[ac];
3384         desc = &ring->desc[ring->cur];
3385         memset(desc, 0, sizeof(*desc));
3386         data = &ring->data[ring->cur];
3387
3388         /* Fill out iwm_tx_cmd to send to the firmware */
3389         cmd = &ring->cmd[ring->cur];
3390         cmd->hdr.code = IWM_TX_CMD;
3391         cmd->hdr.flags = 0;
3392         cmd->hdr.qid = ring->qid;
3393         cmd->hdr.idx = ring->cur;
3394
3395         tx = (void *)cmd->data;
3396         memset(tx, 0, sizeof(*tx));
3397
3398         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3399
3400         /* Encrypt the frame if need be. */
3401         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3402                 /* Retrieve key for TX && do software encryption. */
3403                 k = ieee80211_crypto_encap(ni, m);
3404                 if (k == NULL) {
3405                         m_freem(m);
3406                         return (ENOBUFS);
3407                 }
3408                 /* 802.11 header may have moved. */
3409                 wh = mtod(m, struct ieee80211_frame *);
3410         }
3411
3412         if (ieee80211_radiotap_active_vap(vap)) {
3413                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3414
3415                 tap->wt_flags = 0;
3416                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3417                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3418                 tap->wt_rate = rinfo->rate;
3419                 if (k != NULL)
3420                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3421                 ieee80211_radiotap_tx(vap, m);
3422         }
3423
3424
3425         totlen = m->m_pkthdr.len;
3426
3427         flags = 0;
3428         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3429                 flags |= IWM_TX_CMD_FLG_ACK;
3430         }
3431
3432         if (type == IEEE80211_FC0_TYPE_DATA
3433             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3434             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3435                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3436         }
3437
3438         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3439             type != IEEE80211_FC0_TYPE_DATA)
3440                 tx->sta_id = sc->sc_aux_sta.sta_id;
3441         else
3442                 tx->sta_id = IWM_STATION_ID;
3443
3444         if (type == IEEE80211_FC0_TYPE_MGT) {
3445                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3446
3447                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3448                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3449                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3450                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3451                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3452                 } else {
3453                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3454                 }
3455         } else {
3456                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3457         }
3458
3459         if (hdrlen & 3) {
3460                 /* First segment length must be a multiple of 4. */
3461                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3462                 pad = 4 - (hdrlen & 3);
3463         } else
3464                 pad = 0;
3465
3466         tx->driver_txop = 0;
3467         tx->next_frame_len = 0;
3468
3469         tx->len = htole16(totlen);
3470         tx->tid_tspec = tid;
3471         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3472
3473         /* Set physical address of "scratch area". */
3474         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3475         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3476
3477         /* Copy 802.11 header in TX command. */
3478         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3479
3480         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3481
3482         tx->sec_ctl = 0;
3483         tx->tx_flags |= htole32(flags);
3484
3485         /* Trim 802.11 header. */
3486         m_adj(m, hdrlen);
3487 #if defined(__DragonFly__)
3488         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3489                                             segs, IWM_MAX_SCATTER - 2,
3490                                             &nsegs, BUS_DMA_NOWAIT);
3491 #else
3492         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3493             segs, &nsegs, BUS_DMA_NOWAIT);
3494 #endif
3495         if (error != 0) {
3496 #if defined(__DragonFly__)
3497                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3498                     error);
3499                 m_freem(m);
3500                 return error;
3501 #else
3502                 if (error != EFBIG) {
3503                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3504                             error);
3505                         m_freem(m);
3506                         return error;
3507                 }
3508                 /* Too many DMA segments, linearize mbuf. */
3509                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3510                 if (m1 == NULL) {
3511                         device_printf(sc->sc_dev,
3512                             "%s: could not defrag mbuf\n", __func__);
3513                         m_freem(m);
3514                         return (ENOBUFS);
3515                 }
3516                 m = m1;
3517
3518                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3519                     segs, &nsegs, BUS_DMA_NOWAIT);
3520                 if (error != 0) {
3521                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3522                             error);
3523                         m_freem(m);
3524                         return error;
3525                 }
3526 #endif
3527         }
3528         data->m = m;
3529         data->in = in;
3530         data->done = 0;
3531
3532         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3533             "sending txd %p, in %p\n", data, data->in);
3534         KASSERT(data->in != NULL, ("node is NULL"));
3535
3536         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3537             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3538             ring->qid, ring->cur, totlen, nsegs,
3539             le32toh(tx->tx_flags),
3540             le32toh(tx->rate_n_flags),
3541             tx->initial_rate_index
3542             );
3543
3544         /* Fill TX descriptor. */
3545         desc->num_tbs = 2 + nsegs;
3546
3547         desc->tbs[0].lo = htole32(data->cmd_paddr);
3548         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3549             (TB0_SIZE << 4);
3550         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3551         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3552             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3553               + hdrlen + pad - TB0_SIZE) << 4);
3554
3555         /* Other DMA segments are for data payload. */
3556         for (i = 0; i < nsegs; i++) {
3557                 seg = &segs[i];
3558                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3559                 desc->tbs[i+2].hi_n_len = \
3560                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3561                     | ((seg->ds_len) << 4);
3562         }
3563
3564         bus_dmamap_sync(ring->data_dmat, data->map,
3565             BUS_DMASYNC_PREWRITE);
3566         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3567             BUS_DMASYNC_PREWRITE);
3568         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3569             BUS_DMASYNC_PREWRITE);
3570
3571 #if 0
3572         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3573 #endif
3574
3575         /* Kick TX ring. */
3576         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3577         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3578
3579         /* Mark TX ring as full if we reach a certain threshold. */
3580         if (++ring->queued > IWM_TX_RING_HIMARK) {
3581                 sc->qfullmsk |= 1 << ring->qid;
3582         }
3583
3584         return 0;
3585 }
3586
3587 static int
3588 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3589     const struct ieee80211_bpf_params *params)
3590 {
3591         struct ieee80211com *ic = ni->ni_ic;
3592         struct iwm_softc *sc = ic->ic_softc;
3593         int error = 0;
3594
3595         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3596             "->%s begin\n", __func__);
3597
3598         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3599                 m_freem(m);
3600                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3601                     "<-%s not RUNNING\n", __func__);
3602                 return (ENETDOWN);
3603         }
3604
3605         IWM_LOCK(sc);
3606         /* XXX fix this */
3607         if (params == NULL) {
3608                 error = iwm_tx(sc, m, ni, 0);
3609         } else {
3610                 error = iwm_tx(sc, m, ni, 0);
3611         }
3612         sc->sc_tx_timer = 5;
3613         IWM_UNLOCK(sc);
3614
3615         return (error);
3616 }
3617
3618 /*
3619  * mvm/tx.c
3620  */
3621
3622 #if 0
3623 /*
3624  * Note that there are transports that buffer frames before they reach
3625  * the firmware. This means that after flush_tx_path is called, the
3626  * queue might not be empty. The race-free way to handle this is to:
3627  * 1) set the station as draining
3628  * 2) flush the Tx path
3629  * 3) wait for the transport queues to be empty
3630  */
3631 int
3632 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3633 {
3634         struct iwm_tx_path_flush_cmd flush_cmd = {
3635                 .queues_ctl = htole32(tfd_msk),
3636                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3637         };
3638         int ret;
3639
3640         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3641             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3642             sizeof(flush_cmd), &flush_cmd);
3643         if (ret)
3644                 device_printf(sc->sc_dev,
3645                     "Flushing tx queue failed: %d\n", ret);
3646         return ret;
3647 }
3648 #endif
3649
3650 static int
3651 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3652         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3653 {
3654         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3655             cmd, status);
3656 }
3657
3658 /* send station add/update command to firmware */
3659 static int
3660 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3661 {
3662         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3663         int ret;
3664         uint32_t status;
3665
3666         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3667
3668         add_sta_cmd.sta_id = IWM_STATION_ID;
3669         add_sta_cmd.mac_id_n_color
3670             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3671                 IWM_DEFAULT_COLOR));
3672         if (!update) {
3673                 int ac;
3674                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3675                         add_sta_cmd.tfd_queue_msk |=
3676                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3677                 }
3678                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3679         }
3680         add_sta_cmd.add_modify = update ? 1 : 0;
3681         add_sta_cmd.station_flags_msk
3682             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3683         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3684         if (update)
3685                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3686
3687         status = IWM_ADD_STA_SUCCESS;
3688         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3689         if (ret)
3690                 return ret;
3691
3692         switch (status) {
3693         case IWM_ADD_STA_SUCCESS:
3694                 break;
3695         default:
3696                 ret = EIO;
3697                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3698                 break;
3699         }
3700
3701         return ret;
3702 }
3703
3704 static int
3705 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3706 {
3707         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3708 }
3709
3710 static int
3711 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3712 {
3713         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3714 }
3715
3716 static int
3717 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3718         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3719 {
3720         struct iwm_mvm_add_sta_cmd_v7 cmd;
3721         int ret;
3722         uint32_t status;
3723
3724         memset(&cmd, 0, sizeof(cmd));
3725         cmd.sta_id = sta->sta_id;
3726         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3727
3728         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3729         cmd.tid_disable_tx = htole16(0xffff);
3730
3731         if (addr)
3732                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3733
3734         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3735         if (ret)
3736                 return ret;
3737
3738         switch (status) {
3739         case IWM_ADD_STA_SUCCESS:
3740                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3741                     "%s: Internal station added.\n", __func__);
3742                 return 0;
3743         default:
3744                 device_printf(sc->sc_dev,
3745                     "%s: Add internal station failed, status=0x%x\n",
3746                     __func__, status);
3747                 ret = EIO;
3748                 break;
3749         }
3750         return ret;
3751 }
3752
3753 static int
3754 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3755 {
3756         int ret;
3757
3758         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3759         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3760
3761         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3762         if (ret)
3763                 return ret;
3764
3765         ret = iwm_mvm_add_int_sta_common(sc,
3766             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3767
3768         if (ret)
3769                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3770         return ret;
3771 }
3772
3773 static int
3774 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3775 {
3776         struct iwm_time_quota_cmd cmd;
3777         int i, idx, ret, num_active_macs, quota, quota_rem;
3778         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3779         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3780         uint16_t id;
3781
3782         memset(&cmd, 0, sizeof(cmd));
3783
3784         /* currently, PHY ID == binding ID */
3785         if (in) {
3786                 id = in->in_phyctxt->id;
3787                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3788                 colors[id] = in->in_phyctxt->color;
3789
3790                 if (1)
3791                         n_ifs[id] = 1;
3792         }
3793
3794         /*
3795          * The FW's scheduling session consists of
3796          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3797          * equally between all the bindings that require quota
3798          */
3799         num_active_macs = 0;
3800         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3801                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3802                 num_active_macs += n_ifs[i];
3803         }
3804
3805         quota = 0;
3806         quota_rem = 0;
3807         if (num_active_macs) {
3808                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3809                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3810         }
3811
3812         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3813                 if (colors[i] < 0)
3814                         continue;
3815
3816                 cmd.quotas[idx].id_and_color =
3817                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3818
3819                 if (n_ifs[i] <= 0) {
3820                         cmd.quotas[idx].quota = htole32(0);
3821                         cmd.quotas[idx].max_duration = htole32(0);
3822                 } else {
3823                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3824                         cmd.quotas[idx].max_duration = htole32(0);
3825                 }
3826                 idx++;
3827         }
3828
3829         /* Give the remainder of the session to the first binding */
3830         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3831
3832         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3833             sizeof(cmd), &cmd);
3834         if (ret)
3835                 device_printf(sc->sc_dev,
3836                     "%s: Failed to send quota: %d\n", __func__, ret);
3837         return ret;
3838 }
3839
3840 /*
3841  * ieee80211 routines
3842  */
3843
3844 /*
3845  * Change to AUTH state in 80211 state machine.  Roughly matches what
3846  * Linux does in bss_info_changed().
3847  */
3848 static int
3849 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3850 {
3851         struct ieee80211_node *ni;
3852         struct iwm_node *in;
3853         struct iwm_vap *iv = IWM_VAP(vap);
3854         uint32_t duration;
3855         int error;
3856
3857         /*
3858          * XXX i have a feeling that the vap node is being
3859          * freed from underneath us. Grr.
3860          */
3861         ni = ieee80211_ref_node(vap->iv_bss);
3862         in = IWM_NODE(ni);
3863         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3864             "%s: called; vap=%p, bss ni=%p\n",
3865             __func__,
3866             vap,
3867             ni);
3868
3869         in->in_assoc = 0;
3870
3871         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3872         if (error != 0)
3873                 return error;
3874
3875         error = iwm_allow_mcast(vap, sc);
3876         if (error) {
3877                 device_printf(sc->sc_dev,
3878                     "%s: failed to set multicast\n", __func__);
3879                 goto out;
3880         }
3881
3882         /*
3883          * This is where it deviates from what Linux does.
3884          *
3885          * Linux iwlwifi doesn't reset the nic each time, nor does it
3886          * call ctxt_add() here.  Instead, it adds it during vap creation,
3887          * and always does a mac_ctx_changed().
3888          *
3889          * The openbsd port doesn't attempt to do that - it reset things
3890          * at odd states and does the add here.
3891          *
3892          * So, until the state handling is fixed (ie, we never reset
3893          * the NIC except for a firmware failure, which should drag
3894          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3895          * contexts that are required), let's do a dirty hack here.
3896          */
3897         if (iv->is_uploaded) {
3898                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3899                         device_printf(sc->sc_dev,
3900                             "%s: failed to update MAC\n", __func__);
3901                         goto out;
3902                 }
3903                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3904                     in->in_ni.ni_chan, 1, 1)) != 0) {
3905                         device_printf(sc->sc_dev,
3906                             "%s: failed update phy ctxt\n", __func__);
3907                         goto out;
3908                 }
3909                 in->in_phyctxt = &sc->sc_phyctxt[0];
3910
3911                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3912                         device_printf(sc->sc_dev,
3913                             "%s: binding update cmd\n", __func__);
3914                         goto out;
3915                 }
3916                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3917                         device_printf(sc->sc_dev,
3918                             "%s: failed to update sta\n", __func__);
3919                         goto out;
3920                 }
3921         } else {
3922                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3923                         device_printf(sc->sc_dev,
3924                             "%s: failed to add MAC\n", __func__);
3925                         goto out;
3926                 }
3927                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3928                     in->in_ni.ni_chan, 1, 1)) != 0) {
3929                         device_printf(sc->sc_dev,
3930                             "%s: failed add phy ctxt!\n", __func__);
3931                         error = ETIMEDOUT;
3932                         goto out;
3933                 }
3934                 in->in_phyctxt = &sc->sc_phyctxt[0];
3935
3936                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3937                         device_printf(sc->sc_dev,
3938                             "%s: binding add cmd\n", __func__);
3939                         goto out;
3940                 }
3941                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3942                         device_printf(sc->sc_dev,
3943                             "%s: failed to add sta\n", __func__);
3944                         goto out;
3945                 }
3946         }
3947
3948         /*
3949          * Prevent the FW from wandering off channel during association
3950          * by "protecting" the session with a time event.
3951          */
3952         /* XXX duration is in units of TU, not MS */
3953         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3954         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3955         DELAY(100);
3956
3957         error = 0;
3958 out:
3959         ieee80211_free_node(ni);
3960         return (error);
3961 }
3962
3963 static int
3964 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3965 {
3966         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3967         int error;
3968
3969         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3970                 device_printf(sc->sc_dev,
3971                     "%s: failed to update STA\n", __func__);
3972                 return error;
3973         }
3974
3975         in->in_assoc = 1;
3976         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3977                 device_printf(sc->sc_dev,
3978                     "%s: failed to update MAC\n", __func__);
3979                 return error;
3980         }
3981
3982         return 0;
3983 }
3984
3985 static int
3986 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3987 {
3988         /*
3989          * Ok, so *technically* the proper set of calls for going
3990          * from RUN back to SCAN is:
3991          *
3992          * iwm_mvm_power_mac_disable(sc, in);
3993          * iwm_mvm_mac_ctxt_changed(sc, in);
3994          * iwm_mvm_rm_sta(sc, in);
3995          * iwm_mvm_update_quotas(sc, NULL);
3996          * iwm_mvm_mac_ctxt_changed(sc, in);
3997          * iwm_mvm_binding_remove_vif(sc, in);
3998          * iwm_mvm_mac_ctxt_remove(sc, in);
3999          *
4000          * However, that freezes the device not matter which permutations
4001          * and modifications are attempted.  Obviously, this driver is missing
4002          * something since it works in the Linux driver, but figuring out what
4003          * is missing is a little more complicated.  Now, since we're going
4004          * back to nothing anyway, we'll just do a complete device reset.
4005          * Up your's, device!
4006          */
4007         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
4008         iwm_stop_device(sc);
4009         iwm_init_hw(sc);
4010         if (in)
4011                 in->in_assoc = 0;
4012         return 0;
4013
4014 #if 0
4015         int error;
4016
4017         iwm_mvm_power_mac_disable(sc, in);
4018
4019         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4020                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4021                 return error;
4022         }
4023
4024         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4025                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4026                 return error;
4027         }
4028         error = iwm_mvm_rm_sta(sc, in);
4029         in->in_assoc = 0;
4030         iwm_mvm_update_quotas(sc, NULL);
4031         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4032                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4033                 return error;
4034         }
4035         iwm_mvm_binding_remove_vif(sc, in);
4036
4037         iwm_mvm_mac_ctxt_remove(sc, in);
4038
4039         return error;
4040 #endif
4041 }
4042
4043 static struct ieee80211_node *
4044 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4045 {
4046         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4047             M_INTWAIT | M_ZERO);
4048 }
4049
4050 static void
4051 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4052 {
4053         struct ieee80211_node *ni = &in->in_ni;
4054         struct iwm_lq_cmd *lq = &in->in_lq;
4055         int nrates = ni->ni_rates.rs_nrates;
4056         int i, ridx, tab = 0;
4057         int txant = 0;
4058
4059         if (nrates > nitems(lq->rs_table)) {
4060                 device_printf(sc->sc_dev,
4061                     "%s: node supports %d rates, driver handles "
4062                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4063                 return;
4064         }
4065         if (nrates == 0) {
4066                 device_printf(sc->sc_dev,
4067                     "%s: node supports 0 rates, odd!\n", __func__);
4068                 return;
4069         }
4070
4071         /*
4072          * XXX .. and most of iwm_node is not initialised explicitly;
4073          * it's all just 0x0 passed to the firmware.
4074          */
4075
4076         /* first figure out which rates we should support */
4077         /* XXX TODO: this isn't 11n aware /at all/ */
4078         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4079         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4080             "%s: nrates=%d\n", __func__, nrates);
4081
4082         /*
4083          * Loop over nrates and populate in_ridx from the highest
4084          * rate to the lowest rate.  Remember, in_ridx[] has
4085          * IEEE80211_RATE_MAXSIZE entries!
4086          */
4087         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4088                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4089
4090                 /* Map 802.11 rate to HW rate index. */
4091                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4092                         if (iwm_rates[ridx].rate == rate)
4093                                 break;
4094                 if (ridx > IWM_RIDX_MAX) {
4095                         device_printf(sc->sc_dev,
4096                             "%s: WARNING: device rate for %d not found!\n",
4097                             __func__, rate);
4098                 } else {
4099                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4100                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4101                             __func__,
4102                             i,
4103                             rate,
4104                             ridx);
4105                         in->in_ridx[i] = ridx;
4106                 }
4107         }
4108
4109         /* then construct a lq_cmd based on those */
4110         memset(lq, 0, sizeof(*lq));
4111         lq->sta_id = IWM_STATION_ID;
4112
4113         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4114         if (ni->ni_flags & IEEE80211_NODE_HT)
4115                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4116
4117         /*
4118          * are these used? (we don't do SISO or MIMO)
4119          * need to set them to non-zero, though, or we get an error.
4120          */
4121         lq->single_stream_ant_msk = 1;
4122         lq->dual_stream_ant_msk = 1;
4123
4124         /*
4125          * Build the actual rate selection table.
4126          * The lowest bits are the rates.  Additionally,
4127          * CCK needs bit 9 to be set.  The rest of the bits
4128          * we add to the table select the tx antenna
4129          * Note that we add the rates in the highest rate first
4130          * (opposite of ni_rates).
4131          */
4132         /*
4133          * XXX TODO: this should be looping over the min of nrates
4134          * and LQ_MAX_RETRY_NUM.  Sigh.
4135          */
4136         for (i = 0; i < nrates; i++) {
4137                 int nextant;
4138
4139                 if (txant == 0)
4140                         txant = iwm_fw_valid_tx_ant(sc);
4141                 nextant = 1<<(ffs(txant)-1);
4142                 txant &= ~nextant;
4143
4144                 /*
4145                  * Map the rate id into a rate index into
4146                  * our hardware table containing the
4147                  * configuration to use for this rate.
4148                  */
4149                 ridx = in->in_ridx[i];
4150                 tab = iwm_rates[ridx].plcp;
4151                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4152                 if (IWM_RIDX_IS_CCK(ridx))
4153                         tab |= IWM_RATE_MCS_CCK_MSK;
4154                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4155                     "station rate i=%d, rate=%d, hw=%x\n",
4156                     i, iwm_rates[ridx].rate, tab);
4157                 lq->rs_table[i] = htole32(tab);
4158         }
4159         /* then fill the rest with the lowest possible rate */
4160         for (i = nrates; i < nitems(lq->rs_table); i++) {
4161                 KASSERT(tab != 0, ("invalid tab"));
4162                 lq->rs_table[i] = htole32(tab);
4163         }
4164 }
4165
4166 static int
4167 iwm_media_change(struct ifnet *ifp)
4168 {
4169         struct ieee80211vap *vap = ifp->if_softc;
4170         struct ieee80211com *ic = vap->iv_ic;
4171         struct iwm_softc *sc = ic->ic_softc;
4172         int error;
4173
4174         error = ieee80211_media_change(ifp);
4175         if (error != ENETRESET)
4176                 return error;
4177
4178         IWM_LOCK(sc);
4179         if (ic->ic_nrunning > 0) {
4180                 iwm_stop(sc);
4181                 iwm_init(sc);
4182         }
4183         IWM_UNLOCK(sc);
4184         return error;
4185 }
4186
4187
4188 static int
4189 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4190 {
4191         struct iwm_vap *ivp = IWM_VAP(vap);
4192         struct ieee80211com *ic = vap->iv_ic;
4193         struct iwm_softc *sc = ic->ic_softc;
4194         struct iwm_node *in;
4195         int error;
4196
4197         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4198             "switching state %s -> %s\n",
4199             ieee80211_state_name[vap->iv_state],
4200             ieee80211_state_name[nstate]);
4201         IEEE80211_UNLOCK(ic);
4202         IWM_LOCK(sc);
4203
4204         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4205                 iwm_led_blink_stop(sc);
4206
4207         /* disable beacon filtering if we're hopping out of RUN */
4208         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4209                 iwm_mvm_disable_beacon_filter(sc);
4210
4211                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4212                         in->in_assoc = 0;
4213
4214                 iwm_release(sc, NULL);
4215
4216                 /*
4217                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4218                  * above then the card will be completely reinitialized,
4219                  * so the driver must do everything necessary to bring the card
4220                  * from INIT to SCAN.
4221                  *
4222                  * Additionally, upon receiving deauth frame from AP,
4223                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4224                  * state. This will also fail with this driver, so bring the FSM
4225                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4226                  *
4227                  * XXX TODO: fix this for FreeBSD!
4228                  */
4229                 if (nstate == IEEE80211_S_SCAN ||
4230                     nstate == IEEE80211_S_AUTH ||
4231                     nstate == IEEE80211_S_ASSOC) {
4232                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4233                             "Force transition to INIT; MGT=%d\n", arg);
4234                         IWM_UNLOCK(sc);
4235                         IEEE80211_LOCK(ic);
4236                         /* Always pass arg as -1 since we can't Tx right now. */
4237                         /*
4238                          * XXX arg is just ignored anyway when transitioning
4239                          *     to IEEE80211_S_INIT.
4240                          */
4241                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4242                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4243                             "Going INIT->SCAN\n");
4244                         nstate = IEEE80211_S_SCAN;
4245                         IEEE80211_UNLOCK(ic);
4246                         IWM_LOCK(sc);
4247                 }
4248         }
4249
4250         switch (nstate) {
4251         case IEEE80211_S_INIT:
4252                 break;
4253
4254         case IEEE80211_S_AUTH:
4255                 if ((error = iwm_auth(vap, sc)) != 0) {
4256                         device_printf(sc->sc_dev,
4257                             "%s: could not move to auth state: %d\n",
4258                             __func__, error);
4259                         break;
4260                 }
4261                 break;
4262
4263         case IEEE80211_S_ASSOC:
4264                 if ((error = iwm_assoc(vap, sc)) != 0) {
4265                         device_printf(sc->sc_dev,
4266                             "%s: failed to associate: %d\n", __func__,
4267                             error);
4268                         break;
4269                 }
4270                 break;
4271
4272         case IEEE80211_S_RUN:
4273         {
4274                 struct iwm_host_cmd cmd = {
4275                         .id = IWM_LQ_CMD,
4276                         .len = { sizeof(in->in_lq), },
4277                         .flags = IWM_CMD_SYNC,
4278                 };
4279
4280                 /* Update the association state, now we have it all */
4281                 /* (eg associd comes in at this point */
4282                 error = iwm_assoc(vap, sc);
4283                 if (error != 0) {
4284                         device_printf(sc->sc_dev,
4285                             "%s: failed to update association state: %d\n",
4286                             __func__,
4287                             error);
4288                         break;
4289                 }
4290
4291                 in = IWM_NODE(vap->iv_bss);
4292                 iwm_mvm_power_mac_update_mode(sc, in);
4293                 iwm_mvm_enable_beacon_filter(sc, in);
4294                 iwm_mvm_update_quotas(sc, in);
4295                 iwm_setrates(sc, in);
4296
4297                 cmd.data[0] = &in->in_lq;
4298                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4299                         device_printf(sc->sc_dev,
4300                             "%s: IWM_LQ_CMD failed\n", __func__);
4301                 }
4302
4303                 iwm_mvm_led_enable(sc);
4304                 break;
4305         }
4306
4307         default:
4308                 break;
4309         }
4310         IWM_UNLOCK(sc);
4311         IEEE80211_LOCK(ic);
4312
4313         return (ivp->iv_newstate(vap, nstate, arg));
4314 }
4315
4316 void
4317 iwm_endscan_cb(void *arg, int pending)
4318 {
4319         struct iwm_softc *sc = arg;
4320         struct ieee80211com *ic = &sc->sc_ic;
4321
4322         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4323             "%s: scan ended\n",
4324             __func__);
4325
4326         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4327 }
4328
4329 /*
4330  * Aging and idle timeouts for the different possible scenarios
4331  * in default configuration
4332  */
4333 static const uint32_t
4334 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4335         {
4336                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4337                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4338         },
4339         {
4340                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4341                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4342         },
4343         {
4344                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4345                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4346         },
4347         {
4348                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4349                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4350         },
4351         {
4352                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4353                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4354         },
4355 };
4356
4357 /*
4358  * Aging and idle timeouts for the different possible scenarios
4359  * in single BSS MAC configuration.
4360  */
4361 static const uint32_t
4362 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4363         {
4364                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4365                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4366         },
4367         {
4368                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4369                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4370         },
4371         {
4372                 htole32(IWM_SF_MCAST_AGING_TIMER),
4373                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4374         },
4375         {
4376                 htole32(IWM_SF_BA_AGING_TIMER),
4377                 htole32(IWM_SF_BA_IDLE_TIMER)
4378         },
4379         {
4380                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4381                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4382         },
4383 };
4384
4385 static void
4386 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4387     struct ieee80211_node *ni)
4388 {
4389         int i, j, watermark;
4390
4391         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4392
4393         /*
4394          * If we are in association flow - check antenna configuration
4395          * capabilities of the AP station, and choose the watermark accordingly.
4396          */
4397         if (ni) {
4398                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4399 #ifdef notyet
4400                         if (ni->ni_rxmcs[2] != 0)
4401                                 watermark = IWM_SF_W_MARK_MIMO3;
4402                         else if (ni->ni_rxmcs[1] != 0)
4403                                 watermark = IWM_SF_W_MARK_MIMO2;
4404                         else
4405 #endif
4406                                 watermark = IWM_SF_W_MARK_SISO;
4407                 } else {
4408                         watermark = IWM_SF_W_MARK_LEGACY;
4409                 }
4410         /* default watermark value for unassociated mode. */
4411         } else {
4412                 watermark = IWM_SF_W_MARK_MIMO2;
4413         }
4414         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4415
4416         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4417                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4418                         sf_cmd->long_delay_timeouts[i][j] =
4419                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4420                 }
4421         }
4422
4423         if (ni) {
4424                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4425                        sizeof(iwm_sf_full_timeout));
4426         } else {
4427                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4428                        sizeof(iwm_sf_full_timeout_def));
4429         }
4430 }
4431
4432 static int
4433 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4434 {
4435         struct ieee80211com *ic = &sc->sc_ic;
4436         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4437         struct iwm_sf_cfg_cmd sf_cmd = {
4438                 .state = htole32(IWM_SF_FULL_ON),
4439         };
4440         int ret = 0;
4441
4442         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4443                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4444
4445         switch (new_state) {
4446         case IWM_SF_UNINIT:
4447         case IWM_SF_INIT_OFF:
4448                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4449                 break;
4450         case IWM_SF_FULL_ON:
4451                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4452                 break;
4453         default:
4454                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4455                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4456                           new_state);
4457                 return EINVAL;
4458         }
4459
4460         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4461                                    sizeof(sf_cmd), &sf_cmd);
4462         return ret;
4463 }
4464
4465 static int
4466 iwm_send_bt_init_conf(struct iwm_softc *sc)
4467 {
4468         struct iwm_bt_coex_cmd bt_cmd;
4469
4470         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4471         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4472
4473         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4474             &bt_cmd);
4475 }
4476
4477 static int
4478 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4479 {
4480         struct iwm_mcc_update_cmd mcc_cmd;
4481         struct iwm_host_cmd hcmd = {
4482                 .id = IWM_MCC_UPDATE_CMD,
4483                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4484                 .data = { &mcc_cmd },
4485         };
4486         int ret;
4487 #ifdef IWM_DEBUG
4488         struct iwm_rx_packet *pkt;
4489         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4490         struct iwm_mcc_update_resp *mcc_resp;
4491         int n_channels;
4492         uint16_t mcc;
4493 #endif
4494         int resp_v2 = isset(sc->sc_enabled_capa,
4495             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4496
4497         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4498         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4499         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4500             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4501                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4502         else
4503                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4504
4505         if (resp_v2)
4506                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4507         else
4508                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4509
4510         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4511             "send MCC update to FW with '%c%c' src = %d\n",
4512             alpha2[0], alpha2[1], mcc_cmd.source_id);
4513
4514         ret = iwm_send_cmd(sc, &hcmd);
4515         if (ret)
4516                 return ret;
4517
4518 #ifdef IWM_DEBUG
4519         pkt = hcmd.resp_pkt;
4520
4521         /* Extract MCC response */
4522         if (resp_v2) {
4523                 mcc_resp = (void *)pkt->data;
4524                 mcc = mcc_resp->mcc;
4525                 n_channels =  le32toh(mcc_resp->n_channels);
4526         } else {
4527                 mcc_resp_v1 = (void *)pkt->data;
4528                 mcc = mcc_resp_v1->mcc;
4529                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4530         }
4531
4532         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4533         if (mcc == 0)
4534                 mcc = 0x3030;  /* "00" - world */
4535
4536         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4537             "regulatory domain '%c%c' (%d channels available)\n",
4538             mcc >> 8, mcc & 0xff, n_channels);
4539 #endif
4540         iwm_free_resp(sc, &hcmd);
4541
4542         return 0;
4543 }
4544
4545 static void
4546 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4547 {
4548         struct iwm_host_cmd cmd = {
4549                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4550                 .len = { sizeof(uint32_t), },
4551                 .data = { &backoff, },
4552         };
4553
4554         if (iwm_send_cmd(sc, &cmd) != 0) {
4555                 device_printf(sc->sc_dev,
4556                     "failed to change thermal tx backoff\n");
4557         }
4558 }
4559
4560 static int
4561 iwm_init_hw(struct iwm_softc *sc)
4562 {
4563         struct ieee80211com *ic = &sc->sc_ic;
4564         int error, i, ac;
4565
4566         if ((error = iwm_start_hw(sc)) != 0) {
4567                 kprintf("iwm_start_hw: failed %d\n", error);
4568                 return error;
4569         }
4570
4571         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4572                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4573                 return error;
4574         }
4575
4576         /*
4577          * should stop and start HW since that INIT
4578          * image just loaded
4579          */
4580         iwm_stop_device(sc);
4581         if ((error = iwm_start_hw(sc)) != 0) {
4582                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4583                 return error;
4584         }
4585
4586         /* omstart, this time with the regular firmware */
4587         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4588         if (error) {
4589                 device_printf(sc->sc_dev, "could not load firmware\n");
4590                 goto error;
4591         }
4592
4593         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4594                 device_printf(sc->sc_dev, "bt init conf failed\n");
4595                 goto error;
4596         }
4597
4598         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4599                 device_printf(sc->sc_dev, "antenna config failed\n");
4600                 goto error;
4601         }
4602
4603         /* Send phy db control command and then phy db calibration*/
4604         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4605                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4606                 goto error;
4607         }
4608
4609         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4610                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4611                 goto error;
4612         }
4613
4614         /* Add auxiliary station for scanning */
4615         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4616                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4617                 goto error;
4618         }
4619
4620         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4621                 /*
4622                  * The channel used here isn't relevant as it's
4623                  * going to be overwritten in the other flows.
4624                  * For now use the first channel we have.
4625                  */
4626                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4627                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4628                         goto error;
4629         }
4630
4631         /* Initialize tx backoffs to the minimum. */
4632         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4633                 iwm_mvm_tt_tx_backoff(sc, 0);
4634
4635         error = iwm_mvm_power_update_device(sc);
4636         if (error)
4637                 goto error;
4638
4639         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4640                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4641                         goto error;
4642         }
4643
4644         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4645                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4646                         goto error;
4647         }
4648
4649         /* Enable Tx queues. */
4650         for (ac = 0; ac < WME_NUM_AC; ac++) {
4651                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4652                     iwm_mvm_ac_to_tx_fifo[ac]);
4653                 if (error)
4654                         goto error;
4655         }
4656
4657         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4658                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4659                 goto error;
4660         }
4661
4662         return 0;
4663
4664  error:
4665         iwm_stop_device(sc);
4666         return error;
4667 }
4668
4669 /* Allow multicast from our BSSID. */
4670 static int
4671 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4672 {
4673         struct ieee80211_node *ni = vap->iv_bss;
4674         struct iwm_mcast_filter_cmd *cmd;
4675         size_t size;
4676         int error;
4677
4678         size = roundup(sizeof(*cmd), 4);
4679         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4680         if (cmd == NULL)
4681                 return ENOMEM;
4682         cmd->filter_own = 1;
4683         cmd->port_id = 0;
4684         cmd->count = 0;
4685         cmd->pass_all = 1;
4686         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4687
4688         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4689             IWM_CMD_SYNC, size, cmd);
4690         kfree(cmd, M_DEVBUF);
4691
4692         return (error);
4693 }
4694
4695 /*
4696  * ifnet interfaces
4697  */
4698
4699 static void
4700 iwm_init(struct iwm_softc *sc)
4701 {
4702         int error;
4703
4704         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4705                 return;
4706         }
4707         sc->sc_generation++;
4708         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4709
4710         if ((error = iwm_init_hw(sc)) != 0) {
4711                 kprintf("iwm_init_hw failed %d\n", error);
4712                 iwm_stop(sc);
4713                 return;
4714         }
4715
4716         /*
4717          * Ok, firmware loaded and we are jogging
4718          */
4719         sc->sc_flags |= IWM_FLAG_HW_INITED;
4720         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4721 }
4722
4723 static int
4724 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4725 {
4726         struct iwm_softc *sc;
4727         int error;
4728
4729         sc = ic->ic_softc;
4730
4731         IWM_LOCK(sc);
4732         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4733                 IWM_UNLOCK(sc);
4734                 return (ENXIO);
4735         }
4736         error = mbufq_enqueue(&sc->sc_snd, m);
4737         if (error) {
4738                 IWM_UNLOCK(sc);
4739                 return (error);
4740         }
4741         iwm_start(sc);
4742         IWM_UNLOCK(sc);
4743         return (0);
4744 }
4745
4746 /*
4747  * Dequeue packets from sendq and call send.
4748  */
4749 static void
4750 iwm_start(struct iwm_softc *sc)
4751 {
4752         struct ieee80211_node *ni;
4753         struct mbuf *m;
4754         int ac = 0;
4755
4756         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4757         while (sc->qfullmsk == 0 &&
4758                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4759                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4760                 if (iwm_tx(sc, m, ni, ac) != 0) {
4761                         if_inc_counter(ni->ni_vap->iv_ifp,
4762                             IFCOUNTER_OERRORS, 1);
4763                         ieee80211_free_node(ni);
4764                         continue;
4765                 }
4766                 sc->sc_tx_timer = 15;
4767         }
4768         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4769 }
4770
4771 static void
4772 iwm_stop(struct iwm_softc *sc)
4773 {
4774
4775         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4776         sc->sc_flags |= IWM_FLAG_STOPPED;
4777         sc->sc_generation++;
4778         iwm_led_blink_stop(sc);
4779         sc->sc_tx_timer = 0;
4780         iwm_stop_device(sc);
4781 }
4782
4783 static void
4784 iwm_watchdog(void *arg)
4785 {
4786         struct iwm_softc *sc = arg;
4787
4788         if (sc->sc_tx_timer > 0) {
4789                 if (--sc->sc_tx_timer == 0) {
4790                         device_printf(sc->sc_dev, "device timeout\n");
4791 #ifdef IWM_DEBUG
4792                         iwm_nic_error(sc);
4793 #endif
4794                         iwm_stop(sc);
4795 #if defined(__DragonFly__)
4796                         ++sc->sc_ic.ic_oerrors;
4797 #else
4798                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4799 #endif
4800                         return;
4801                 }
4802         }
4803         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4804 }
4805
4806 static void
4807 iwm_parent(struct ieee80211com *ic)
4808 {
4809         struct iwm_softc *sc = ic->ic_softc;
4810         int startall = 0;
4811
4812         IWM_LOCK(sc);
4813         if (ic->ic_nrunning > 0) {
4814                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4815                         iwm_init(sc);
4816                         startall = 1;
4817                 }
4818         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4819                 iwm_stop(sc);
4820         IWM_UNLOCK(sc);
4821         if (startall)
4822                 ieee80211_start_all(ic);
4823 }
4824
4825 /*
4826  * The interrupt side of things
4827  */
4828
4829 /*
4830  * error dumping routines are from iwlwifi/mvm/utils.c
4831  */
4832
4833 /*
4834  * Note: This structure is read from the device with IO accesses,
4835  * and the reading already does the endian conversion. As it is
4836  * read with uint32_t-sized accesses, any members with a different size
4837  * need to be ordered correctly though!
4838  */
4839 struct iwm_error_event_table {
4840         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4841         uint32_t error_id;              /* type of error */
4842         uint32_t trm_hw_status0;        /* TRM HW status */
4843         uint32_t trm_hw_status1;        /* TRM HW status */
4844         uint32_t blink2;                /* branch link */
4845         uint32_t ilink1;                /* interrupt link */
4846         uint32_t ilink2;                /* interrupt link */
4847         uint32_t data1;         /* error-specific data */
4848         uint32_t data2;         /* error-specific data */
4849         uint32_t data3;         /* error-specific data */
4850         uint32_t bcon_time;             /* beacon timer */
4851         uint32_t tsf_low;               /* network timestamp function timer */
4852         uint32_t tsf_hi;                /* network timestamp function timer */
4853         uint32_t gp1;           /* GP1 timer register */
4854         uint32_t gp2;           /* GP2 timer register */
4855         uint32_t fw_rev_type;   /* firmware revision type */
4856         uint32_t major;         /* uCode version major */
4857         uint32_t minor;         /* uCode version minor */
4858         uint32_t hw_ver;                /* HW Silicon version */
4859         uint32_t brd_ver;               /* HW board version */
4860         uint32_t log_pc;                /* log program counter */
4861         uint32_t frame_ptr;             /* frame pointer */
4862         uint32_t stack_ptr;             /* stack pointer */
4863         uint32_t hcmd;          /* last host command header */
4864         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4865                                  * rxtx_flag */
4866         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4867                                  * host_flag */
4868         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4869                                  * enc_flag */
4870         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4871                                  * time_flag */
4872         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4873                                  * wico interrupt */
4874         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4875         uint32_t wait_event;            /* wait event() caller address */
4876         uint32_t l2p_control;   /* L2pControlField */
4877         uint32_t l2p_duration;  /* L2pDurationField */
4878         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4879         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4880         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4881                                  * (LMPM_PMG_SEL) */
4882         uint32_t u_timestamp;   /* indicate when the date and time of the
4883                                  * compilation */
4884         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4885 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4886
4887 /*
4888  * UMAC error struct - relevant starting from family 8000 chip.
4889  * Note: This structure is read from the device with IO accesses,
4890  * and the reading already does the endian conversion. As it is
4891  * read with u32-sized accesses, any members with a different size
4892  * need to be ordered correctly though!
4893  */
4894 struct iwm_umac_error_event_table {
4895         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4896         uint32_t error_id;      /* type of error */
4897         uint32_t blink1;        /* branch link */
4898         uint32_t blink2;        /* branch link */
4899         uint32_t ilink1;        /* interrupt link */
4900         uint32_t ilink2;        /* interrupt link */
4901         uint32_t data1;         /* error-specific data */
4902         uint32_t data2;         /* error-specific data */
4903         uint32_t data3;         /* error-specific data */
4904         uint32_t umac_major;
4905         uint32_t umac_minor;
4906         uint32_t frame_pointer; /* core register 27*/
4907         uint32_t stack_pointer; /* core register 28 */
4908         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4909         uint32_t nic_isr_pref;  /* ISR status register */
4910 } __packed;
4911
4912 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4913 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4914
4915 #ifdef IWM_DEBUG
4916 struct {
4917         const char *name;
4918         uint8_t num;
4919 } advanced_lookup[] = {
4920         { "NMI_INTERRUPT_WDG", 0x34 },
4921         { "SYSASSERT", 0x35 },
4922         { "UCODE_VERSION_MISMATCH", 0x37 },
4923         { "BAD_COMMAND", 0x38 },
4924         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4925         { "FATAL_ERROR", 0x3D },
4926         { "NMI_TRM_HW_ERR", 0x46 },
4927         { "NMI_INTERRUPT_TRM", 0x4C },
4928         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4929         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4930         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4931         { "NMI_INTERRUPT_HOST", 0x66 },
4932         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4933         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4934         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4935         { "ADVANCED_SYSASSERT", 0 },
4936 };
4937
4938 static const char *
4939 iwm_desc_lookup(uint32_t num)
4940 {
4941         int i;
4942
4943         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4944                 if (advanced_lookup[i].num == num)
4945                         return advanced_lookup[i].name;
4946
4947         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4948         return advanced_lookup[i].name;
4949 }
4950
4951 static void
4952 iwm_nic_umac_error(struct iwm_softc *sc)
4953 {
4954         struct iwm_umac_error_event_table table;
4955         uint32_t base;
4956
4957         base = sc->sc_uc.uc_umac_error_event_table;
4958
4959         if (base < 0x800000) {
4960                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4961                     base);
4962                 return;
4963         }
4964
4965         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4966                 device_printf(sc->sc_dev, "reading errlog failed\n");
4967                 return;
4968         }
4969
4970         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4971                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4972                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4973                     sc->sc_flags, table.valid);
4974         }
4975
4976         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4977                 iwm_desc_lookup(table.error_id));
4978         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4979         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4980         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4981             table.ilink1);
4982         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4983             table.ilink2);
4984         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4985         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4986         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4987         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4988         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4989         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4990             table.frame_pointer);
4991         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4992             table.stack_pointer);
4993         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4994         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4995             table.nic_isr_pref);
4996 }
4997
4998 /*
4999  * Support for dumping the error log seemed like a good idea ...
5000  * but it's mostly hex junk and the only sensible thing is the
5001  * hw/ucode revision (which we know anyway).  Since it's here,
5002  * I'll just leave it in, just in case e.g. the Intel guys want to
5003  * help us decipher some "ADVANCED_SYSASSERT" later.
5004  */
5005 static void
5006 iwm_nic_error(struct iwm_softc *sc)
5007 {
5008         struct iwm_error_event_table table;
5009         uint32_t base;
5010
5011         device_printf(sc->sc_dev, "dumping device error log\n");
5012         base = sc->sc_uc.uc_error_event_table;
5013         if (base < 0x800000) {
5014                 device_printf(sc->sc_dev,
5015                     "Invalid error log pointer 0x%08x\n", base);
5016                 return;
5017         }
5018
5019         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5020                 device_printf(sc->sc_dev, "reading errlog failed\n");
5021                 return;
5022         }
5023
5024         if (!table.valid) {
5025                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5026                 return;
5027         }
5028
5029         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5030                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5031                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5032                     sc->sc_flags, table.valid);
5033         }
5034
5035         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5036             iwm_desc_lookup(table.error_id));
5037         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5038             table.trm_hw_status0);
5039         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5040             table.trm_hw_status1);
5041         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5042         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5043         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5044         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5045         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5046         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5047         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5048         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5049         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5050         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5051         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5052         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5053             table.fw_rev_type);
5054         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5055         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5056         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5057         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5058         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5059         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5060         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5061         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5062         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5063         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5064         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5065         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5066         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5067         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5068         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5069         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5070         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5071         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5072         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5073
5074         if (sc->sc_uc.uc_umac_error_event_table)
5075                 iwm_nic_umac_error(sc);
5076 }
5077 #endif
5078
5079 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
5080 do {                                                                    \
5081         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5082         _var_ = (void *)((_pkt_)+1);                                    \
5083 } while (/*CONSTCOND*/0)
5084
5085 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
5086 do {                                                                    \
5087         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5088         _ptr_ = (void *)((_pkt_)+1);                                    \
5089 } while (/*CONSTCOND*/0)
5090
5091 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5092
5093 /*
5094  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5095  * Basic structure from if_iwn
5096  */
5097 static void
5098 iwm_notif_intr(struct iwm_softc *sc)
5099 {
5100         uint16_t hw;
5101
5102         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5103             BUS_DMASYNC_POSTREAD);
5104
5105         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5106
5107         /*
5108          * Process responses
5109          */
5110         while (sc->rxq.cur != hw) {
5111                 struct iwm_rx_ring *ring = &sc->rxq;
5112                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5113                 struct iwm_rx_packet *pkt;
5114                 struct iwm_cmd_response *cresp;
5115                 int qid, idx, code;
5116
5117                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5118                     BUS_DMASYNC_POSTREAD);
5119                 pkt = mtod(data->m, struct iwm_rx_packet *);
5120
5121                 qid = pkt->hdr.qid & ~0x80;
5122                 idx = pkt->hdr.idx;
5123
5124                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5125                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5126                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5127                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5128
5129                 /*
5130                  * randomly get these from the firmware, no idea why.
5131                  * they at least seem harmless, so just ignore them for now
5132                  */
5133                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5134                     || pkt->len_n_flags == htole32(0x55550000))) {
5135                         ADVANCE_RXQ(sc);
5136                         continue;
5137                 }
5138
5139                 switch (code) {
5140                 case IWM_REPLY_RX_PHY_CMD:
5141                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5142                         break;
5143
5144                 case IWM_REPLY_RX_MPDU_CMD:
5145                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5146                         break;
5147
5148                 case IWM_TX_CMD:
5149                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5150                         break;
5151
5152                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5153                         struct iwm_missed_beacons_notif *resp;
5154                         int missed;
5155
5156                         /* XXX look at mac_id to determine interface ID */
5157                         struct ieee80211com *ic = &sc->sc_ic;
5158                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5159
5160                         SYNC_RESP_STRUCT(resp, pkt);
5161                         missed = le32toh(resp->consec_missed_beacons);
5162
5163                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5164                             "%s: MISSED_BEACON: mac_id=%d, "
5165                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5166                             "num_rx=%d\n",
5167                             __func__,
5168                             le32toh(resp->mac_id),
5169                             le32toh(resp->consec_missed_beacons_since_last_rx),
5170                             le32toh(resp->consec_missed_beacons),
5171                             le32toh(resp->num_expected_beacons),
5172                             le32toh(resp->num_recvd_beacons));
5173
5174                         /* Be paranoid */
5175                         if (vap == NULL)
5176                                 break;
5177
5178                         /* XXX no net80211 locking? */
5179                         if (vap->iv_state == IEEE80211_S_RUN &&
5180                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5181                                 if (missed > vap->iv_bmissthreshold) {
5182                                         /* XXX bad locking; turn into task */
5183                                         IWM_UNLOCK(sc);
5184                                         ieee80211_beacon_miss(ic);
5185                                         IWM_LOCK(sc);
5186                                 }
5187                         }
5188
5189                         break; }
5190
5191                 case IWM_MFUART_LOAD_NOTIFICATION:
5192                         break;
5193
5194                 case IWM_MVM_ALIVE: {
5195                         struct iwm_mvm_alive_resp_v1 *resp1;
5196                         struct iwm_mvm_alive_resp_v2 *resp2;
5197                         struct iwm_mvm_alive_resp_v3 *resp3;
5198
5199                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5200                                 SYNC_RESP_STRUCT(resp1, pkt);
5201                                 sc->sc_uc.uc_error_event_table
5202                                     = le32toh(resp1->error_event_table_ptr);
5203                                 sc->sc_uc.uc_log_event_table
5204                                     = le32toh(resp1->log_event_table_ptr);
5205                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5206                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5207                                         sc->sc_uc.uc_ok = 1;
5208                                 else
5209                                         sc->sc_uc.uc_ok = 0;
5210                         }
5211
5212                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5213                                 SYNC_RESP_STRUCT(resp2, pkt);
5214                                 sc->sc_uc.uc_error_event_table
5215                                     = le32toh(resp2->error_event_table_ptr);
5216                                 sc->sc_uc.uc_log_event_table
5217                                     = le32toh(resp2->log_event_table_ptr);
5218                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5219                                 sc->sc_uc.uc_umac_error_event_table
5220                                     = le32toh(resp2->error_info_addr);
5221                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5222                                         sc->sc_uc.uc_ok = 1;
5223                                 else
5224                                         sc->sc_uc.uc_ok = 0;
5225                         }
5226
5227                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5228                                 SYNC_RESP_STRUCT(resp3, pkt);
5229                                 sc->sc_uc.uc_error_event_table
5230                                     = le32toh(resp3->error_event_table_ptr);
5231                                 sc->sc_uc.uc_log_event_table
5232                                     = le32toh(resp3->log_event_table_ptr);
5233                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5234                                 sc->sc_uc.uc_umac_error_event_table
5235                                     = le32toh(resp3->error_info_addr);
5236                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5237                                         sc->sc_uc.uc_ok = 1;
5238                                 else
5239                                         sc->sc_uc.uc_ok = 0;
5240                         }
5241
5242                         sc->sc_uc.uc_intr = 1;
5243                         wakeup(&sc->sc_uc);
5244                         break; }
5245
5246                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5247                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5248                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5249
5250                         iwm_phy_db_set_section(sc, phy_db_notif);
5251
5252                         break; }
5253
5254                 case IWM_STATISTICS_NOTIFICATION: {
5255                         struct iwm_notif_statistics *stats;
5256                         SYNC_RESP_STRUCT(stats, pkt);
5257                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5258                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
5259                         break; }
5260
5261                 case IWM_NVM_ACCESS_CMD:
5262                 case IWM_MCC_UPDATE_CMD:
5263                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5264                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5265                                     BUS_DMASYNC_POSTREAD);
5266                                 memcpy(sc->sc_cmd_resp,
5267                                     pkt, sizeof(sc->sc_cmd_resp));
5268                         }
5269                         break;
5270
5271                 case IWM_MCC_CHUB_UPDATE_CMD: {
5272                         struct iwm_mcc_chub_notif *notif;
5273                         SYNC_RESP_STRUCT(notif, pkt);
5274
5275                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5276                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5277                         sc->sc_fw_mcc[2] = '\0';
5278                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5279                             "fw source %d sent CC '%s'\n",
5280                             notif->source_id, sc->sc_fw_mcc);
5281                         break; }
5282
5283                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5284                         break;
5285
5286                 case IWM_PHY_CONFIGURATION_CMD:
5287                 case IWM_TX_ANT_CONFIGURATION_CMD:
5288                 case IWM_ADD_STA:
5289                 case IWM_MAC_CONTEXT_CMD:
5290                 case IWM_REPLY_SF_CFG_CMD:
5291                 case IWM_POWER_TABLE_CMD:
5292                 case IWM_PHY_CONTEXT_CMD:
5293                 case IWM_BINDING_CONTEXT_CMD:
5294                 case IWM_TIME_EVENT_CMD:
5295                 case IWM_SCAN_REQUEST_CMD:
5296                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5297                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5298                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5299                 case IWM_REPLY_BEACON_FILTERING_CMD:
5300                 case IWM_MAC_PM_POWER_TABLE:
5301                 case IWM_TIME_QUOTA_CMD:
5302                 case IWM_REMOVE_STA:
5303                 case IWM_TXPATH_FLUSH:
5304                 case IWM_LQ_CMD:
5305                 case IWM_BT_CONFIG:
5306                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5307                         SYNC_RESP_STRUCT(cresp, pkt);
5308                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5309                                 memcpy(sc->sc_cmd_resp,
5310                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5311                         }
5312                         break;
5313
5314                 /* ignore */
5315                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5316                         break;
5317
5318                 case IWM_INIT_COMPLETE_NOTIF:
5319                         sc->sc_init_complete = 1;
5320                         wakeup(&sc->sc_init_complete);
5321                         break;
5322
5323                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5324                         struct iwm_periodic_scan_complete *notif;
5325                         SYNC_RESP_STRUCT(notif, pkt);
5326
5327                         break; }
5328
5329                 case IWM_SCAN_ITERATION_COMPLETE: {
5330                         struct iwm_lmac_scan_complete_notif *notif;
5331                         SYNC_RESP_STRUCT(notif, pkt);
5332                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5333                         break; }
5334
5335                 case IWM_SCAN_COMPLETE_UMAC: {
5336                         struct iwm_umac_scan_complete *notif;
5337                         SYNC_RESP_STRUCT(notif, pkt);
5338
5339                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5340                             "UMAC scan complete, status=0x%x\n",
5341                             notif->status);
5342 #if 0   /* XXX This would be a duplicate scan end call */
5343                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5344 #endif
5345                         break;
5346                 }
5347
5348                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5349                         struct iwm_umac_scan_iter_complete_notif *notif;
5350                         SYNC_RESP_STRUCT(notif, pkt);
5351
5352                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5353                             "complete, status=0x%x, %d channels scanned\n",
5354                             notif->status, notif->scanned_channels);
5355                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5356                         break;
5357                 }
5358
5359                 case IWM_REPLY_ERROR: {
5360                         struct iwm_error_resp *resp;
5361                         SYNC_RESP_STRUCT(resp, pkt);
5362
5363                         device_printf(sc->sc_dev,
5364                             "firmware error 0x%x, cmd 0x%x\n",
5365                             le32toh(resp->error_type),
5366                             resp->cmd_id);
5367                         break; }
5368
5369                 case IWM_TIME_EVENT_NOTIFICATION: {
5370                         struct iwm_time_event_notif *notif;
5371                         SYNC_RESP_STRUCT(notif, pkt);
5372
5373                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5374                             "TE notif status = 0x%x action = 0x%x\n",
5375                             notif->status, notif->action);
5376                         break; }
5377
5378                 case IWM_MCAST_FILTER_CMD:
5379                         break;
5380
5381                 case IWM_SCD_QUEUE_CFG: {
5382                         struct iwm_scd_txq_cfg_rsp *rsp;
5383                         SYNC_RESP_STRUCT(rsp, pkt);
5384
5385                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5386                             "queue cfg token=0x%x sta_id=%d "
5387                             "tid=%d scd_queue=%d\n",
5388                             rsp->token, rsp->sta_id, rsp->tid,
5389                             rsp->scd_queue);
5390                         break;
5391                 }
5392
5393                 default:
5394                         device_printf(sc->sc_dev,
5395                             "frame %d/%d %x UNHANDLED (this should "
5396                             "not happen)\n", qid, idx,
5397                             pkt->len_n_flags);
5398                         break;
5399                 }
5400
5401                 /*
5402                  * Why test bit 0x80?  The Linux driver:
5403                  *
5404                  * There is one exception:  uCode sets bit 15 when it
5405                  * originates the response/notification, i.e. when the
5406                  * response/notification is not a direct response to a
5407                  * command sent by the driver.  For example, uCode issues
5408                  * IWM_REPLY_RX when it sends a received frame to the driver;
5409                  * it is not a direct response to any driver command.
5410                  *
5411                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5412                  * uses a slightly different format for pkt->hdr, and "qid"
5413                  * is actually the upper byte of a two-byte field.
5414                  */
5415                 if (!(pkt->hdr.qid & (1 << 7))) {
5416                         iwm_cmd_done(sc, pkt);
5417                 }
5418
5419                 ADVANCE_RXQ(sc);
5420         }
5421
5422         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5423             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5424
5425         /*
5426          * Tell the firmware what we have processed.
5427          * Seems like the hardware gets upset unless we align
5428          * the write by 8??
5429          */
5430         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5431         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5432 }
5433
5434 static void
5435 iwm_intr(void *arg)
5436 {
5437         struct iwm_softc *sc = arg;
5438         int handled = 0;
5439         int r1, r2, rv = 0;
5440         int isperiodic = 0;
5441
5442 #if defined(__DragonFly__)
5443         if (sc->sc_mem == NULL) {
5444                 kprintf("iwm_intr: detached\n");
5445                 return;
5446         }
5447 #endif
5448         IWM_LOCK(sc);
5449         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5450
5451         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5452                 uint32_t *ict = sc->ict_dma.vaddr;
5453                 int tmp;
5454
5455                 tmp = htole32(ict[sc->ict_cur]);
5456                 if (!tmp)
5457                         goto out_ena;
5458
5459                 /*
5460                  * ok, there was something.  keep plowing until we have all.
5461                  */
5462                 r1 = r2 = 0;
5463                 while (tmp) {
5464                         r1 |= tmp;
5465                         ict[sc->ict_cur] = 0;
5466                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5467                         tmp = htole32(ict[sc->ict_cur]);
5468                 }
5469
5470                 /* this is where the fun begins.  don't ask */
5471                 if (r1 == 0xffffffff)
5472                         r1 = 0;
5473
5474                 /* i am not expected to understand this */
5475                 if (r1 & 0xc0000)
5476                         r1 |= 0x8000;
5477                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5478         } else {
5479                 r1 = IWM_READ(sc, IWM_CSR_INT);
5480                 /* "hardware gone" (where, fishing?) */
5481                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5482                         goto out;
5483                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5484         }
5485         if (r1 == 0 && r2 == 0) {
5486                 goto out_ena;
5487         }
5488
5489         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5490
5491         /* ignored */
5492         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5493
5494         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5495                 int i;
5496                 struct ieee80211com *ic = &sc->sc_ic;
5497                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5498
5499 #ifdef IWM_DEBUG
5500                 iwm_nic_error(sc);
5501 #endif
5502                 /* Dump driver status (TX and RX rings) while we're here. */
5503                 device_printf(sc->sc_dev, "driver status:\n");
5504                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5505                         struct iwm_tx_ring *ring = &sc->txq[i];
5506                         device_printf(sc->sc_dev,
5507                             "  tx ring %2d: qid=%-2d cur=%-3d "
5508                             "queued=%-3d\n",
5509                             i, ring->qid, ring->cur, ring->queued);
5510                 }
5511                 device_printf(sc->sc_dev,
5512                     "  rx ring: cur=%d\n", sc->rxq.cur);
5513                 device_printf(sc->sc_dev,
5514                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5515
5516                 /* Don't stop the device; just do a VAP restart */
5517                 IWM_UNLOCK(sc);
5518
5519                 if (vap == NULL) {
5520                         kprintf("%s: null vap\n", __func__);
5521                         return;
5522                 }
5523
5524                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5525                     "restarting\n", __func__, vap->iv_state);
5526
5527                 /* XXX TODO: turn this into a callout/taskqueue */
5528                 ieee80211_restart_all(ic);
5529                 return;
5530         }
5531
5532         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5533                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5534                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5535                 iwm_stop(sc);
5536                 rv = 1;
5537                 goto out;
5538         }
5539
5540         /* firmware chunk loaded */
5541         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5542                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5543                 handled |= IWM_CSR_INT_BIT_FH_TX;
5544                 sc->sc_fw_chunk_done = 1;
5545                 wakeup(&sc->sc_fw);
5546         }
5547
5548         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5549                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5550                 if (iwm_check_rfkill(sc)) {
5551                         device_printf(sc->sc_dev,
5552                             "%s: rfkill switch, disabling interface\n",
5553                             __func__);
5554                         iwm_stop(sc);
5555                 }
5556         }
5557
5558         /*
5559          * The Linux driver uses periodic interrupts to avoid races.
5560          * We cargo-cult like it's going out of fashion.
5561          */
5562         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5563                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5564                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5565                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5566                         IWM_WRITE_1(sc,
5567                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5568                 isperiodic = 1;
5569         }
5570
5571         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5572                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5573                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5574
5575                 iwm_notif_intr(sc);
5576
5577                 /* enable periodic interrupt, see above */
5578                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5579                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5580                             IWM_CSR_INT_PERIODIC_ENA);
5581         }
5582
5583         if (__predict_false(r1 & ~handled))
5584                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5585                     "%s: unhandled interrupts: %x\n", __func__, r1);
5586         rv = 1;
5587
5588  out_ena:
5589         iwm_restore_interrupts(sc);
5590  out:
5591         IWM_UNLOCK(sc);
5592         return;
5593 }
5594
5595 /*
5596  * Autoconf glue-sniffing
5597  */
5598 #define PCI_VENDOR_INTEL                0x8086
5599 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5600 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5601 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5602 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5603 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5604 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5605 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5606 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5607 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5608 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5609
5610 static const struct iwm_devices {
5611         uint16_t        device;
5612         const char      *name;
5613 } iwm_devices[] = {
5614         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5615         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5616         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5617         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5618         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5619         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5620         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5621         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5622         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5623         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5624 };
5625
5626 static int
5627 iwm_probe(device_t dev)
5628 {
5629         int i;
5630
5631         for (i = 0; i < nitems(iwm_devices); i++) {
5632                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5633                     pci_get_device(dev) == iwm_devices[i].device) {
5634                         device_set_desc(dev, iwm_devices[i].name);
5635                         return (BUS_PROBE_DEFAULT);
5636                 }
5637         }
5638
5639         return (ENXIO);
5640 }
5641
5642 static int
5643 iwm_dev_check(device_t dev)
5644 {
5645         struct iwm_softc *sc;
5646
5647         sc = device_get_softc(dev);
5648
5649         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5650         switch (pci_get_device(dev)) {
5651         case PCI_PRODUCT_INTEL_WL_3160_1:
5652         case PCI_PRODUCT_INTEL_WL_3160_2:
5653                 sc->sc_fwname = "iwm3160fw";
5654                 sc->host_interrupt_operation_mode = 1;
5655                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5656                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5657                 return (0);
5658         case PCI_PRODUCT_INTEL_WL_3165_1:
5659         case PCI_PRODUCT_INTEL_WL_3165_2:
5660                 sc->sc_fwname = "iwm7265fw";
5661                 sc->host_interrupt_operation_mode = 0;
5662                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5663                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5664                 return (0);
5665         case PCI_PRODUCT_INTEL_WL_7260_1:
5666         case PCI_PRODUCT_INTEL_WL_7260_2:
5667                 sc->sc_fwname = "iwm7260fw";
5668                 sc->host_interrupt_operation_mode = 1;
5669                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5670                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5671                 return (0);
5672         case PCI_PRODUCT_INTEL_WL_7265_1:
5673         case PCI_PRODUCT_INTEL_WL_7265_2:
5674                 sc->sc_fwname = "iwm7265fw";
5675                 sc->host_interrupt_operation_mode = 0;
5676                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5677                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5678                 return (0);
5679         case PCI_PRODUCT_INTEL_WL_8260_1:
5680         case PCI_PRODUCT_INTEL_WL_8260_2:
5681                 sc->sc_fwname = "iwm8000Cfw";
5682                 sc->host_interrupt_operation_mode = 0;
5683                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5684                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5685                 return (0);
5686         default:
5687                 device_printf(dev, "unknown adapter type\n");
5688                 return ENXIO;
5689         }
5690 }
5691
5692 static int
5693 iwm_pci_attach(device_t dev)
5694 {
5695         struct iwm_softc *sc;
5696         int count, error, rid;
5697         uint16_t reg;
5698 #if defined(__DragonFly__)
5699         int irq_flags;
5700 #endif
5701
5702         sc = device_get_softc(dev);
5703
5704         /* Clear device-specific "PCI retry timeout" register (41h). */
5705         reg = pci_read_config(dev, 0x40, sizeof(reg));
5706         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5707
5708         /* Enable bus-mastering and hardware bug workaround. */
5709         pci_enable_busmaster(dev);
5710         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5711         /* if !MSI */
5712         if (reg & PCIM_STATUS_INTxSTATE) {
5713                 reg &= ~PCIM_STATUS_INTxSTATE;
5714         }
5715         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5716
5717         rid = PCIR_BAR(0);
5718         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5719             RF_ACTIVE);
5720         if (sc->sc_mem == NULL) {
5721                 device_printf(sc->sc_dev, "can't map mem space\n");
5722                 return (ENXIO);
5723         }
5724         sc->sc_st = rman_get_bustag(sc->sc_mem);
5725         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5726
5727         /* Install interrupt handler. */
5728         count = 1;
5729         rid = 0;
5730 #if defined(__DragonFly__)
5731         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5732         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5733 #else
5734         if (pci_alloc_msi(dev, &count) == 0)
5735                 rid = 1;
5736         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5737             (rid != 0 ? 0 : RF_SHAREABLE));
5738 #endif
5739         if (sc->sc_irq == NULL) {
5740                 device_printf(dev, "can't map interrupt\n");
5741                         return (ENXIO);
5742         }
5743 #if defined(__DragonFly__)
5744         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5745                                iwm_intr, sc, &sc->sc_ih,
5746                                &wlan_global_serializer);
5747 #else
5748         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5749             NULL, iwm_intr, sc, &sc->sc_ih);
5750 #endif
5751         if (sc->sc_ih == NULL) {
5752                 device_printf(dev, "can't establish interrupt");
5753 #if defined(__DragonFly__)
5754                 pci_release_msi(dev);
5755 #endif
5756                         return (ENXIO);
5757         }
5758         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5759
5760         return (0);
5761 }
5762
5763 static void
5764 iwm_pci_detach(device_t dev)
5765 {
5766         struct iwm_softc *sc = device_get_softc(dev);
5767
5768         if (sc->sc_irq != NULL) {
5769                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5770                 bus_release_resource(dev, SYS_RES_IRQ,
5771                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5772                 pci_release_msi(dev);
5773 #if defined(__DragonFly__)
5774                 sc->sc_irq = NULL;
5775 #endif
5776         }
5777         if (sc->sc_mem != NULL) {
5778                 bus_release_resource(dev, SYS_RES_MEMORY,
5779                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5780 #if defined(__DragonFly__)
5781                 sc->sc_mem = NULL;
5782 #endif
5783         }
5784 }
5785
5786
5787
5788 static int
5789 iwm_attach(device_t dev)
5790 {
5791         struct iwm_softc *sc = device_get_softc(dev);
5792         struct ieee80211com *ic = &sc->sc_ic;
5793         int error;
5794         int txq_i, i;
5795
5796         sc->sc_dev = dev;
5797         IWM_LOCK_INIT(sc);
5798         mbufq_init(&sc->sc_snd, ifqmaxlen);
5799 #if defined(__DragonFly__)
5800         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5801 #else
5802         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5803 #endif
5804         callout_init(&sc->sc_led_blink_to);
5805         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5806         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5807             taskqueue_thread_enqueue, &sc->sc_tq);
5808 #if defined(__DragonFly__)
5809         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5810                                         -1, "iwm_taskq");
5811 #else
5812         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5813 #endif
5814         if (error != 0) {
5815                 device_printf(dev, "can't start threads, error %d\n",
5816                     error);
5817                 goto fail;
5818         }
5819
5820         /* PCI attach */
5821         error = iwm_pci_attach(dev);
5822         if (error != 0)
5823                 goto fail;
5824
5825         sc->sc_wantresp = -1;
5826
5827         /* Check device type */
5828         error = iwm_dev_check(dev);
5829         if (error != 0)
5830                 goto fail;
5831
5832         /*
5833          * We now start fiddling with the hardware
5834          */
5835         /*
5836          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5837          * changed, and now the revision step also includes bit 0-1 (no more
5838          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5839          * in the old format.
5840          */
5841         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5842                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5843                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5844
5845         if (iwm_prepare_card_hw(sc) != 0) {
5846                 device_printf(dev, "could not initialize hardware\n");
5847                 goto fail;
5848         }
5849
5850         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5851                 int ret;
5852                 uint32_t hw_step;
5853
5854                 /*
5855                  * In order to recognize C step the driver should read the
5856                  * chip version id located at the AUX bus MISC address.
5857                  */
5858                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5859                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5860                 DELAY(2);
5861
5862                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5863                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5864                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5865                                    25000);
5866                 if (!ret) {
5867                         device_printf(sc->sc_dev,
5868                             "Failed to wake up the nic\n");
5869                         goto fail;
5870                 }
5871
5872                 if (iwm_nic_lock(sc)) {
5873                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5874                         hw_step |= IWM_ENABLE_WFPM;
5875                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5876                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5877                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5878                         if (hw_step == 0x3)
5879                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5880                                                 (IWM_SILICON_C_STEP << 2);
5881                         iwm_nic_unlock(sc);
5882                 } else {
5883                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5884                         goto fail;
5885                 }
5886         }
5887
5888         /* Allocate DMA memory for firmware transfers. */
5889         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5890                 device_printf(dev, "could not allocate memory for firmware\n");
5891                 goto fail;
5892         }
5893
5894         /* Allocate "Keep Warm" page. */
5895         if ((error = iwm_alloc_kw(sc)) != 0) {
5896                 device_printf(dev, "could not allocate keep warm page\n");
5897                 goto fail;
5898         }
5899
5900         /* We use ICT interrupts */
5901         if ((error = iwm_alloc_ict(sc)) != 0) {
5902                 device_printf(dev, "could not allocate ICT table\n");
5903                 goto fail;
5904         }
5905
5906         /* Allocate TX scheduler "rings". */
5907         if ((error = iwm_alloc_sched(sc)) != 0) {
5908                 device_printf(dev, "could not allocate TX scheduler rings\n");
5909                 goto fail;
5910         }
5911
5912         /* Allocate TX rings */
5913         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5914                 if ((error = iwm_alloc_tx_ring(sc,
5915                     &sc->txq[txq_i], txq_i)) != 0) {
5916                         device_printf(dev,
5917                             "could not allocate TX ring %d\n",
5918                             txq_i);
5919                         goto fail;
5920                 }
5921         }
5922
5923         /* Allocate RX ring. */
5924         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5925                 device_printf(dev, "could not allocate RX ring\n");
5926                 goto fail;
5927         }
5928
5929         /* Clear pending interrupts. */
5930         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5931
5932         ic->ic_softc = sc;
5933         ic->ic_name = device_get_nameunit(sc->sc_dev);
5934         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5935         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5936
5937         /* Set device capabilities. */
5938         ic->ic_caps =
5939             IEEE80211_C_STA |
5940             IEEE80211_C_WPA |           /* WPA/RSN */
5941             IEEE80211_C_WME |
5942             IEEE80211_C_SHSLOT |        /* short slot time supported */
5943             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5944 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5945             ;
5946         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5947                 sc->sc_phyctxt[i].id = i;
5948                 sc->sc_phyctxt[i].color = 0;
5949                 sc->sc_phyctxt[i].ref = 0;
5950                 sc->sc_phyctxt[i].channel = NULL;
5951         }
5952
5953         /* Max RSSI */
5954         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5955         sc->sc_preinit_hook.ich_func = iwm_preinit;
5956         sc->sc_preinit_hook.ich_arg = sc;
5957         sc->sc_preinit_hook.ich_desc = "iwm";
5958         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5959                 device_printf(dev, "config_intrhook_establish failed\n");
5960                 goto fail;
5961         }
5962
5963 #ifdef IWM_DEBUG
5964         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5965             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5966             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5967 #endif
5968
5969         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5970             "<-%s\n", __func__);
5971
5972         return 0;
5973
5974         /* Free allocated memory if something failed during attachment. */
5975 fail:
5976         iwm_detach_local(sc, 0);
5977
5978         return ENXIO;
5979 }
5980
5981 static int
5982 iwm_is_valid_ether_addr(uint8_t *addr)
5983 {
5984         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5985
5986         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5987                 return (FALSE);
5988
5989         return (TRUE);
5990 }
5991
5992 static int
5993 iwm_update_edca(struct ieee80211com *ic)
5994 {
5995         struct iwm_softc *sc = ic->ic_softc;
5996
5997         device_printf(sc->sc_dev, "%s: called\n", __func__);
5998         return (0);
5999 }
6000
6001 static void
6002 iwm_preinit(void *arg)
6003 {
6004         struct iwm_softc *sc = arg;
6005         device_t dev = sc->sc_dev;
6006         struct ieee80211com *ic = &sc->sc_ic;
6007         int error;
6008
6009         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6010             "->%s\n", __func__);
6011
6012         IWM_LOCK(sc);
6013         if ((error = iwm_start_hw(sc)) != 0) {
6014                 device_printf(dev, "could not initialize hardware\n");
6015                 IWM_UNLOCK(sc);
6016                 goto fail;
6017         }
6018
6019         error = iwm_run_init_mvm_ucode(sc, 1);
6020         iwm_stop_device(sc);
6021         if (error) {
6022                 IWM_UNLOCK(sc);
6023                 goto fail;
6024         }
6025         device_printf(dev,
6026             "hw rev 0x%x, fw ver %s, address %s\n",
6027             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6028             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
6029
6030         /* not all hardware can do 5GHz band */
6031         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
6032                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6033                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6034         IWM_UNLOCK(sc);
6035
6036         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6037             ic->ic_channels);
6038
6039         /*
6040          * At this point we've committed - if we fail to do setup,
6041          * we now also have to tear down the net80211 state.
6042          */
6043         ieee80211_ifattach(ic);
6044         ic->ic_vap_create = iwm_vap_create;
6045         ic->ic_vap_delete = iwm_vap_delete;
6046         ic->ic_raw_xmit = iwm_raw_xmit;
6047         ic->ic_node_alloc = iwm_node_alloc;
6048         ic->ic_scan_start = iwm_scan_start;
6049         ic->ic_scan_end = iwm_scan_end;
6050         ic->ic_update_mcast = iwm_update_mcast;
6051         ic->ic_getradiocaps = iwm_init_channel_map;
6052         ic->ic_set_channel = iwm_set_channel;
6053         ic->ic_scan_curchan = iwm_scan_curchan;
6054         ic->ic_scan_mindwell = iwm_scan_mindwell;
6055         ic->ic_wme.wme_update = iwm_update_edca;
6056         ic->ic_parent = iwm_parent;
6057         ic->ic_transmit = iwm_transmit;
6058         iwm_radiotap_attach(sc);
6059         if (bootverbose)
6060                 ieee80211_announce(ic);
6061
6062         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6063             "<-%s\n", __func__);
6064         config_intrhook_disestablish(&sc->sc_preinit_hook);
6065
6066         return;
6067 fail:
6068         config_intrhook_disestablish(&sc->sc_preinit_hook);
6069         iwm_detach_local(sc, 0);
6070 }
6071
6072 /*
6073  * Attach the interface to 802.11 radiotap.
6074  */
6075 static void
6076 iwm_radiotap_attach(struct iwm_softc *sc)
6077 {
6078         struct ieee80211com *ic = &sc->sc_ic;
6079
6080         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6081             "->%s begin\n", __func__);
6082         ieee80211_radiotap_attach(ic,
6083             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6084                 IWM_TX_RADIOTAP_PRESENT,
6085             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6086                 IWM_RX_RADIOTAP_PRESENT);
6087         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6088             "->%s end\n", __func__);
6089 }
6090
6091 static struct ieee80211vap *
6092 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6093     enum ieee80211_opmode opmode, int flags,
6094     const uint8_t bssid[IEEE80211_ADDR_LEN],
6095     const uint8_t mac[IEEE80211_ADDR_LEN])
6096 {
6097         struct iwm_vap *ivp;
6098         struct ieee80211vap *vap;
6099
6100         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6101                 return NULL;
6102         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6103         vap = &ivp->iv_vap;
6104         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6105         vap->iv_bmissthreshold = 10;            /* override default */
6106         /* Override with driver methods. */
6107         ivp->iv_newstate = vap->iv_newstate;
6108         vap->iv_newstate = iwm_newstate;
6109
6110         ieee80211_ratectl_init(vap);
6111         /* Complete setup. */
6112         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6113             mac);
6114         ic->ic_opmode = opmode;
6115
6116         return vap;
6117 }
6118
6119 static void
6120 iwm_vap_delete(struct ieee80211vap *vap)
6121 {
6122         struct iwm_vap *ivp = IWM_VAP(vap);
6123
6124         ieee80211_ratectl_deinit(vap);
6125         ieee80211_vap_detach(vap);
6126         kfree(ivp, M_80211_VAP);
6127 }
6128
6129 static void
6130 iwm_scan_start(struct ieee80211com *ic)
6131 {
6132         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6133         struct iwm_softc *sc = ic->ic_softc;
6134         int error;
6135
6136         IWM_LOCK(sc);
6137         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6138                 error = iwm_mvm_umac_scan(sc);
6139         else
6140                 error = iwm_mvm_lmac_scan(sc);
6141         if (error != 0) {
6142                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6143                 IWM_UNLOCK(sc);
6144                 ieee80211_cancel_scan(vap);
6145         } else {
6146                 iwm_led_blink_start(sc);
6147                 IWM_UNLOCK(sc);
6148         }
6149 }
6150
6151 static void
6152 iwm_scan_end(struct ieee80211com *ic)
6153 {
6154         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6155         struct iwm_softc *sc = ic->ic_softc;
6156
6157         IWM_LOCK(sc);
6158         iwm_led_blink_stop(sc);
6159         if (vap->iv_state == IEEE80211_S_RUN)
6160                 iwm_mvm_led_enable(sc);
6161         IWM_UNLOCK(sc);
6162 }
6163
6164 static void
6165 iwm_update_mcast(struct ieee80211com *ic)
6166 {
6167 }
6168
6169 static void
6170 iwm_set_channel(struct ieee80211com *ic)
6171 {
6172 }
6173
6174 static void
6175 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6176 {
6177 }
6178
6179 static void
6180 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6181 {
6182         return;
6183 }
6184
6185 void
6186 iwm_init_task(void *arg1)
6187 {
6188         struct iwm_softc *sc = arg1;
6189
6190         IWM_LOCK(sc);
6191         while (sc->sc_flags & IWM_FLAG_BUSY) {
6192 #if defined(__DragonFly__)
6193                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6194 #else
6195                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6196 #endif
6197 }
6198         sc->sc_flags |= IWM_FLAG_BUSY;
6199         iwm_stop(sc);
6200         if (sc->sc_ic.ic_nrunning > 0)
6201                 iwm_init(sc);
6202         sc->sc_flags &= ~IWM_FLAG_BUSY;
6203         wakeup(&sc->sc_flags);
6204         IWM_UNLOCK(sc);
6205 }
6206
6207 static int
6208 iwm_resume(device_t dev)
6209 {
6210         struct iwm_softc *sc = device_get_softc(dev);
6211         int do_reinit = 0;
6212         uint16_t reg;
6213
6214         /* Clear device-specific "PCI retry timeout" register (41h). */
6215         reg = pci_read_config(dev, 0x40, sizeof(reg));
6216         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6217         iwm_init_task(device_get_softc(dev));
6218
6219         IWM_LOCK(sc);
6220         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6221                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6222                 do_reinit = 1;
6223         }
6224         IWM_UNLOCK(sc);
6225
6226         if (do_reinit)
6227                 ieee80211_resume_all(&sc->sc_ic);
6228
6229         return 0;
6230 }
6231
6232 static int
6233 iwm_suspend(device_t dev)
6234 {
6235         int do_stop = 0;
6236         struct iwm_softc *sc = device_get_softc(dev);
6237
6238         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6239
6240         ieee80211_suspend_all(&sc->sc_ic);
6241
6242         if (do_stop) {
6243                 IWM_LOCK(sc);
6244                 iwm_stop(sc);
6245                 sc->sc_flags |= IWM_FLAG_SCANNING;
6246                 IWM_UNLOCK(sc);
6247         }
6248
6249         return (0);
6250 }
6251
6252 static int
6253 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6254 {
6255         struct iwm_fw_info *fw = &sc->sc_fw;
6256         device_t dev = sc->sc_dev;
6257         int i;
6258
6259         if (sc->sc_tq) {
6260 #if defined(__DragonFly__)
6261                 /* doesn't exist for DFly, DFly drains tasks on free */
6262 #else
6263                 taskqueue_drain_all(sc->sc_tq);
6264 #endif
6265                 taskqueue_free(sc->sc_tq);
6266 #if defined(__DragonFly__)
6267                 sc->sc_tq = NULL;
6268 #endif
6269         }
6270         callout_drain(&sc->sc_led_blink_to);
6271         callout_drain(&sc->sc_watchdog_to);
6272         iwm_stop_device(sc);
6273         if (do_net80211) {
6274                 ieee80211_ifdetach(&sc->sc_ic);
6275         }
6276
6277         iwm_phy_db_free(sc);
6278
6279         /* Free descriptor rings */
6280         iwm_free_rx_ring(sc, &sc->rxq);
6281         for (i = 0; i < nitems(sc->txq); i++)
6282                 iwm_free_tx_ring(sc, &sc->txq[i]);
6283
6284         /* Free firmware */
6285         if (fw->fw_fp != NULL)
6286                 iwm_fw_info_free(fw);
6287
6288         /* Free scheduler */
6289         iwm_free_sched(sc);
6290         if (sc->ict_dma.vaddr != NULL)
6291                 iwm_free_ict(sc);
6292         if (sc->kw_dma.vaddr != NULL)
6293                 iwm_free_kw(sc);
6294         if (sc->fw_dma.vaddr != NULL)
6295                 iwm_free_fwmem(sc);
6296
6297         /* Finished with the hardware - detach things */
6298         iwm_pci_detach(dev);
6299
6300         mbufq_drain(&sc->sc_snd);
6301         IWM_LOCK_DESTROY(sc);
6302
6303         return (0);
6304 }
6305
6306 static int
6307 iwm_detach(device_t dev)
6308 {
6309         struct iwm_softc *sc = device_get_softc(dev);
6310
6311         return (iwm_detach_local(sc, 1));
6312 }
6313
6314 static device_method_t iwm_pci_methods[] = {
6315         /* Device interface */
6316         DEVMETHOD(device_probe,         iwm_probe),
6317         DEVMETHOD(device_attach,        iwm_attach),
6318         DEVMETHOD(device_detach,        iwm_detach),
6319         DEVMETHOD(device_suspend,       iwm_suspend),
6320         DEVMETHOD(device_resume,        iwm_resume),
6321
6322         DEVMETHOD_END
6323 };
6324
6325 static driver_t iwm_pci_driver = {
6326         "iwm",
6327         iwm_pci_methods,
6328         sizeof (struct iwm_softc)
6329 };
6330
6331 static devclass_t iwm_devclass;
6332
6333 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6334 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6335 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6336 MODULE_DEPEND(iwm, wlan, 1, 1, 1);