kernel/iwm: Add intr_config_hook description.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      wlan_serialize_enter()/exit() hacks (will be removable when we
123  *                                           do the device netif removal).
124  *      header file paths (DFly allows localized path specifications).
125  *      minor header file differences.
126  *
127  * Comprehensive list of adjustments for DragonFly #ifdef'd:
128  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
129  *      packet counters
130  *      RUNNING and OACTIVE tests
131  *      msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
132  *      mtx -> lk  (mtx functions -> lockmgr functions)
133  *      callout differences
134  *      taskqueue differences
135  *      iwm_start() and ifq differences
136  *      iwm_ioctl() differences
137  *      MSI differences
138  *      bus_setup_intr() differences
139  *      minor PCI config register naming differences
140  */
141 #include <sys/cdefs.h>
142 __FBSDID("$FreeBSD$");
143
144 #include <sys/param.h>
145 #include <sys/bus.h>
146 #include <sys/endian.h>
147 #include <sys/firmware.h>
148 #include <sys/kernel.h>
149 #include <sys/malloc.h>
150 #include <sys/mbuf.h>
151 #include <sys/mutex.h>
152 #include <sys/module.h>
153 #include <sys/proc.h>
154 #include <sys/rman.h>
155 #include <sys/socket.h>
156 #include <sys/sockio.h>
157 #include <sys/sysctl.h>
158 #include <sys/linker.h>
159
160 #include <machine/endian.h>
161
162 #include <bus/pci/pcivar.h>
163 #include <bus/pci/pcireg.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_arp.h>
170 #include <net/ethernet.h>
171 #include <net/if_dl.h>
172 #include <net/if_media.h>
173 #include <net/if_types.h>
174 #include <net/ifq_var.h>
175
176 #include <netinet/in.h>
177 #include <netinet/in_systm.h>
178 #include <netinet/if_ether.h>
179 #include <netinet/ip.h>
180
181 #include <netproto/802_11/ieee80211_var.h>
182 #include <netproto/802_11/ieee80211_regdomain.h>
183 #include <netproto/802_11/ieee80211_ratectl.h>
184 #include <netproto/802_11/ieee80211_radiotap.h>
185
186 #include "if_iwmreg.h"
187 #include "if_iwmvar.h"
188 #include "if_iwm_debug.h"
189 #include "if_iwm_util.h"
190 #include "if_iwm_binding.h"
191 #include "if_iwm_phy_db.h"
192 #include "if_iwm_mac_ctxt.h"
193 #include "if_iwm_phy_ctxt.h"
194 #include "if_iwm_time_event.h"
195 #include "if_iwm_power.h"
196 #include "if_iwm_scan.h"
197 #include "if_iwm_pcie_trans.h"
198
199 const uint8_t iwm_nvm_channels[] = {
200         /* 2.4 GHz */
201         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
202         /* 5 GHz */
203         36, 40, 44 , 48, 52, 56, 60, 64,
204         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
205         149, 153, 157, 161, 165
206 };
207 #define IWM_NUM_2GHZ_CHANNELS   14
208
209 /*
210  * XXX For now, there's simply a fixed set of rate table entries
211  * that are populated.
212  */
213 const struct iwm_rate {
214         uint8_t rate;
215         uint8_t plcp;
216 } iwm_rates[] = {
217         {   2,  IWM_RATE_1M_PLCP  },
218         {   4,  IWM_RATE_2M_PLCP  },
219         {  11,  IWM_RATE_5M_PLCP  },
220         {  22,  IWM_RATE_11M_PLCP },
221         {  12,  IWM_RATE_6M_PLCP  },
222         {  18,  IWM_RATE_9M_PLCP  },
223         {  24,  IWM_RATE_12M_PLCP },
224         {  36,  IWM_RATE_18M_PLCP },
225         {  48,  IWM_RATE_24M_PLCP },
226         {  72,  IWM_RATE_36M_PLCP },
227         {  96,  IWM_RATE_48M_PLCP },
228         { 108,  IWM_RATE_54M_PLCP },
229 };
230 #define IWM_RIDX_CCK    0
231 #define IWM_RIDX_OFDM   4
232 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
233 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
234 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
235
236 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
237 static int      iwm_firmware_store_section(struct iwm_softc *,
238                                            enum iwm_ucode_type,
239                                            const uint8_t *, size_t);
240 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
241 static void     iwm_fw_info_free(struct iwm_fw_info *);
242 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
244 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
245                                      bus_size_t, bus_size_t);
246 static void     iwm_dma_contig_free(struct iwm_dma_info *);
247 static int      iwm_alloc_fwmem(struct iwm_softc *);
248 static void     iwm_free_fwmem(struct iwm_softc *);
249 static int      iwm_alloc_sched(struct iwm_softc *);
250 static void     iwm_free_sched(struct iwm_softc *);
251 static int      iwm_alloc_kw(struct iwm_softc *);
252 static void     iwm_free_kw(struct iwm_softc *);
253 static int      iwm_alloc_ict(struct iwm_softc *);
254 static void     iwm_free_ict(struct iwm_softc *);
255 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
257 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
258 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
259                                   int);
260 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
261 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
262 static void     iwm_enable_interrupts(struct iwm_softc *);
263 static void     iwm_restore_interrupts(struct iwm_softc *);
264 static void     iwm_disable_interrupts(struct iwm_softc *);
265 static void     iwm_ict_reset(struct iwm_softc *);
266 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
267 static void     iwm_stop_device(struct iwm_softc *);
268 static void     iwm_mvm_nic_config(struct iwm_softc *);
269 static int      iwm_nic_rx_init(struct iwm_softc *);
270 static int      iwm_nic_tx_init(struct iwm_softc *);
271 static int      iwm_nic_init(struct iwm_softc *);
272 static void     iwm_enable_txq(struct iwm_softc *, int, int);
273 static int      iwm_post_alive(struct iwm_softc *);
274 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
275                                    uint16_t, uint8_t *, uint16_t *);
276 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
277                                      uint16_t *);
278 static void     iwm_init_channel_map(struct iwm_softc *,
279                                      const uint16_t * const);
280 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281                                    const uint16_t *, const uint16_t *, uint8_t,
282                                    uint8_t);
283 struct iwm_nvm_section;
284 static int      iwm_parse_nvm_sections(struct iwm_softc *,
285                                        struct iwm_nvm_section *);
286 static int      iwm_nvm_init(struct iwm_softc *);
287 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
288                                         const uint8_t *, uint32_t);
289 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
290 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
291 static int      iwm_fw_alive(struct iwm_softc *, uint32_t);
292 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
293 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
294 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
295                                               enum iwm_ucode_type);
296 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
297 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
298 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
299 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
300                                             struct iwm_rx_phy_info *);
301 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
302                                       struct iwm_rx_packet *,
303                                       struct iwm_rx_data *);
304 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
305 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
306                                    struct iwm_rx_data *);
307 static void     iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
308                                          struct iwm_rx_packet *,
309                                          struct iwm_node *);
310 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
311                                   struct iwm_rx_data *);
312 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
313 #if 0
314 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
315                                  uint16_t);
316 #endif
317 static const struct iwm_rate *
318         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
319                         struct ieee80211_frame *, struct iwm_tx_cmd *);
320 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
321                        struct ieee80211_node *, int);
322 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
323                              const struct ieee80211_bpf_params *);
324 static void     iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
325                                              struct iwm_mvm_add_sta_cmd_v5 *);
326 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
327                                                 struct iwm_mvm_add_sta_cmd_v6 *,
328                                                 int *);
329 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
330                                        int);
331 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
332 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
333 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
334                                            struct iwm_int_sta *,
335                                            const uint8_t *, uint16_t, uint16_t);
336 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
337 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
338 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
339 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
340 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
341 static struct ieee80211_node *
342                 iwm_node_alloc(struct ieee80211vap *,
343                                const uint8_t[IEEE80211_ADDR_LEN]);
344 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
345 static int      iwm_media_change(struct ifnet *);
346 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
347 static void     iwm_endscan_cb(void *, int);
348 static int      iwm_init_hw(struct iwm_softc *);
349 static void     iwm_init(void *);
350 static void     iwm_init_locked(struct iwm_softc *);
351 #if defined(__DragonFly__)
352 static void     iwm_start(struct ifnet *,  struct ifaltq_subque *);
353 #else
354 static void     iwm_start(struct ifnet *);
355 #endif
356 static void     iwm_start_locked(struct ifnet *);
357 static void     iwm_stop(struct ifnet *, int);
358 static void     iwm_stop_locked(struct ifnet *);
359 static void     iwm_watchdog(void *);
360 #if defined(__DragonFly__)
361 static int      iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t, struct ucred *cred);
362 #else
363 static int      iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
364 #endif
365 #ifdef IWM_DEBUG
366 static const char *
367                 iwm_desc_lookup(uint32_t);
368 static void     iwm_nic_error(struct iwm_softc *);
369 #endif
370 static void     iwm_notif_intr(struct iwm_softc *);
371 static void     iwm_intr(void *);
372 static int      iwm_attach(device_t);
373 static void     iwm_preinit(void *);
374 static int      iwm_detach_local(struct iwm_softc *sc, int);
375 static void     iwm_init_task(void *);
376 static void     iwm_radiotap_attach(struct iwm_softc *);
377 static struct ieee80211vap *
378                 iwm_vap_create(struct ieee80211com *,
379                                const char [IFNAMSIZ], int,
380                                enum ieee80211_opmode, int,
381                                const uint8_t [IEEE80211_ADDR_LEN],
382                                const uint8_t [IEEE80211_ADDR_LEN]);
383 static void     iwm_vap_delete(struct ieee80211vap *);
384 static void     iwm_scan_start(struct ieee80211com *);
385 static void     iwm_scan_end(struct ieee80211com *);
386 static void     iwm_update_mcast(struct ifnet *);
387 static void     iwm_set_channel(struct ieee80211com *);
388 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
389 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
390 static int      iwm_detach(device_t);
391
392 #if defined(__DragonFly__)
393 static int      iwm_msi_enable = 1;
394
395 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
396
397 /*
398  * This is a hack due to the wlan_serializer deadlocking sleepers.
399  */
400 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
401
402 int
403 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
404 {
405         int error;
406
407         if (wlan_is_serialized()) {
408                 wlan_serialize_exit();
409                 error = lksleep(chan, lk, flags, wmesg, to);
410                 lockmgr(lk, LK_RELEASE);
411                 wlan_serialize_enter();
412                 lockmgr(lk, LK_EXCLUSIVE);
413         } else {
414                 error = lksleep(chan, lk, flags, wmesg, to);
415         }
416         return error;
417 }
418
419 #endif
420
421 /*
422  * Firmware parser.
423  */
424
425 static int
426 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
427 {
428         const struct iwm_fw_cscheme_list *l = (const void *)data;
429
430         if (dlen < sizeof(*l) ||
431             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
432                 return EINVAL;
433
434         /* we don't actually store anything for now, always use s/w crypto */
435
436         return 0;
437 }
438
439 static int
440 iwm_firmware_store_section(struct iwm_softc *sc,
441     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
442 {
443         struct iwm_fw_sects *fws;
444         struct iwm_fw_onesect *fwone;
445
446         if (type >= IWM_UCODE_TYPE_MAX)
447                 return EINVAL;
448         if (dlen < sizeof(uint32_t))
449                 return EINVAL;
450
451         fws = &sc->sc_fw.fw_sects[type];
452         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
453                 return EINVAL;
454
455         fwone = &fws->fw_sect[fws->fw_count];
456
457         /* first 32bit are device load offset */
458         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
459
460         /* rest is data */
461         fwone->fws_data = data + sizeof(uint32_t);
462         fwone->fws_len = dlen - sizeof(uint32_t);
463
464         fws->fw_count++;
465         fws->fw_totlen += fwone->fws_len;
466
467         return 0;
468 }
469
470 /* iwlwifi: iwl-drv.c */
471 struct iwm_tlv_calib_data {
472         uint32_t ucode_type;
473         struct iwm_tlv_calib_ctrl calib;
474 } __packed;
475
476 static int
477 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
478 {
479         const struct iwm_tlv_calib_data *def_calib = data;
480         uint32_t ucode_type = le32toh(def_calib->ucode_type);
481
482         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
483                 device_printf(sc->sc_dev,
484                     "Wrong ucode_type %u for default "
485                     "calibration.\n", ucode_type);
486                 return EINVAL;
487         }
488
489         sc->sc_default_calib[ucode_type].flow_trigger =
490             def_calib->calib.flow_trigger;
491         sc->sc_default_calib[ucode_type].event_trigger =
492             def_calib->calib.event_trigger;
493
494         return 0;
495 }
496
497 static void
498 iwm_fw_info_free(struct iwm_fw_info *fw)
499 {
500         firmware_put(fw->fw_rawdata, FIRMWARE_UNLOAD);
501         fw->fw_rawdata = NULL;
502         fw->fw_rawsize = 0;
503         /* don't touch fw->fw_status */
504         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
505 }
506
507 static int
508 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
509 {
510         struct iwm_fw_info *fw = &sc->sc_fw;
511         const struct iwm_tlv_ucode_header *uhdr;
512         struct iwm_ucode_tlv tlv;
513         enum iwm_ucode_tlv_type tlv_type;
514         const struct firmware *fwp;
515         const uint8_t *data;
516         int error = 0;
517         size_t len;
518
519         if (fw->fw_status == IWM_FW_STATUS_DONE &&
520             ucode_type != IWM_UCODE_TYPE_INIT)
521                 return 0;
522
523         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
524 #if defined(__DragonFly__)
525                 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
526 #else
527                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
528 #endif
529         }
530         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
531
532         if (fw->fw_rawdata != NULL)
533                 iwm_fw_info_free(fw);
534
535         /*
536          * Load firmware into driver memory.
537          * fw_rawdata and fw_rawsize will be set.
538          */
539         IWM_UNLOCK(sc);
540         fwp = firmware_get(sc->sc_fwname);
541         if (fwp == NULL) {
542                 device_printf(sc->sc_dev,
543                     "could not read firmware %s (error %d)\n",
544                     sc->sc_fwname, error);
545                 IWM_LOCK(sc);
546                 goto out;
547         }
548         IWM_LOCK(sc);
549         fw->fw_rawdata = fwp->data;
550         fw->fw_rawsize = fwp->datasize;
551
552         /*
553          * Parse firmware contents
554          */
555
556         uhdr = (const void *)fw->fw_rawdata;
557         if (*(const uint32_t *)fw->fw_rawdata != 0
558             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
559                 device_printf(sc->sc_dev, "invalid firmware %s\n",
560                     sc->sc_fwname);
561                 error = EINVAL;
562                 goto out;
563         }
564
565         sc->sc_fwver = le32toh(uhdr->ver);
566         data = uhdr->data;
567         len = fw->fw_rawsize - sizeof(*uhdr);
568
569         while (len >= sizeof(tlv)) {
570                 size_t tlv_len;
571                 const void *tlv_data;
572
573                 memcpy(&tlv, data, sizeof(tlv));
574                 tlv_len = le32toh(tlv.length);
575                 tlv_type = le32toh(tlv.type);
576
577                 len -= sizeof(tlv);
578                 data += sizeof(tlv);
579                 tlv_data = data;
580
581                 if (len < tlv_len) {
582                         device_printf(sc->sc_dev,
583                             "firmware too short: %zu bytes\n",
584                             len);
585                         error = EINVAL;
586                         goto parse_out;
587                 }
588
589                 switch ((int)tlv_type) {
590                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
591                         if (tlv_len < sizeof(uint32_t)) {
592                                 device_printf(sc->sc_dev,
593                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
594                                     __func__,
595                                     (int) tlv_len);
596                                 error = EINVAL;
597                                 goto parse_out;
598                         }
599                         sc->sc_capa_max_probe_len
600                             = le32toh(*(const uint32_t *)tlv_data);
601                         /* limit it to something sensible */
602                         if (sc->sc_capa_max_probe_len > (1<<16)) {
603                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
604                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
605                                     "ridiculous\n", __func__);
606                                 error = EINVAL;
607                                 goto parse_out;
608                         }
609                         break;
610                 case IWM_UCODE_TLV_PAN:
611                         if (tlv_len) {
612                                 device_printf(sc->sc_dev,
613                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
614                                     __func__,
615                                     (int) tlv_len);
616                                 error = EINVAL;
617                                 goto parse_out;
618                         }
619                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
620                         break;
621                 case IWM_UCODE_TLV_FLAGS:
622                         if (tlv_len < sizeof(uint32_t)) {
623                                 device_printf(sc->sc_dev,
624                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
625                                     __func__,
626                                     (int) tlv_len);
627                                 error = EINVAL;
628                                 goto parse_out;
629                         }
630                         /*
631                          * Apparently there can be many flags, but Linux driver
632                          * parses only the first one, and so do we.
633                          *
634                          * XXX: why does this override IWM_UCODE_TLV_PAN?
635                          * Intentional or a bug?  Observations from
636                          * current firmware file:
637                          *  1) TLV_PAN is parsed first
638                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
639                          * ==> this resets TLV_PAN to itself... hnnnk
640                          */
641                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
642                         break;
643                 case IWM_UCODE_TLV_CSCHEME:
644                         if ((error = iwm_store_cscheme(sc,
645                             tlv_data, tlv_len)) != 0) {
646                                 device_printf(sc->sc_dev,
647                                     "%s: iwm_store_cscheme(): returned %d\n",
648                                     __func__,
649                                     error);
650                                 goto parse_out;
651                         }
652                         break;
653                 case IWM_UCODE_TLV_NUM_OF_CPU:
654                         if (tlv_len != sizeof(uint32_t)) {
655                                 device_printf(sc->sc_dev,
656                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
657                                     __func__,
658                                     (int) tlv_len);
659                                 error = EINVAL;
660                                 goto parse_out;
661                         }
662                         if (le32toh(*(const uint32_t*)tlv_data) != 1) {
663                                 device_printf(sc->sc_dev,
664                                     "%s: driver supports "
665                                     "only TLV_NUM_OF_CPU == 1",
666                                     __func__);
667                                 error = EINVAL;
668                                 goto parse_out;
669                         }
670                         break;
671                 case IWM_UCODE_TLV_SEC_RT:
672                         if ((error = iwm_firmware_store_section(sc,
673                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
674                                 device_printf(sc->sc_dev,
675                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
676                                     __func__,
677                                     error);
678                                 goto parse_out;
679                         }
680                         break;
681                 case IWM_UCODE_TLV_SEC_INIT:
682                         if ((error = iwm_firmware_store_section(sc,
683                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
684                                 device_printf(sc->sc_dev,
685                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
686                                     __func__,
687                                     error);
688                                 goto parse_out;
689                         }
690                         break;
691                 case IWM_UCODE_TLV_SEC_WOWLAN:
692                         if ((error = iwm_firmware_store_section(sc,
693                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_DEF_CALIB:
702                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
703                                 device_printf(sc->sc_dev,
704                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
705                                     __func__,
706                                     (int) tlv_len,
707                                     (int) sizeof(struct iwm_tlv_calib_data));
708                                 error = EINVAL;
709                                 goto parse_out;
710                         }
711                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
712                                 device_printf(sc->sc_dev,
713                                     "%s: iwm_set_default_calib() failed: %d\n",
714                                     __func__,
715                                     error);
716                                 goto parse_out;
717                         }
718                         break;
719                 case IWM_UCODE_TLV_PHY_SKU:
720                         if (tlv_len != sizeof(uint32_t)) {
721                                 error = EINVAL;
722                                 device_printf(sc->sc_dev,
723                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
724                                     __func__,
725                                     (int) tlv_len);
726                                 goto parse_out;
727                         }
728                         sc->sc_fw_phy_config =
729                             le32toh(*(const uint32_t *)tlv_data);
730                         break;
731
732                 case IWM_UCODE_TLV_API_CHANGES_SET:
733                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
734                         /* ignore, not used by current driver */
735                         break;
736
737                 default:
738                         device_printf(sc->sc_dev,
739                             "%s: unknown firmware section %d, abort\n",
740                             __func__, tlv_type);
741                         error = EINVAL;
742                         goto parse_out;
743                 }
744
745                 len -= roundup(tlv_len, 4);
746                 data += roundup(tlv_len, 4);
747         }
748
749         KASSERT(error == 0, ("unhandled error"));
750
751  parse_out:
752         if (error) {
753                 device_printf(sc->sc_dev, "firmware parse error %d, "
754                     "section type %d\n", error, tlv_type);
755         }
756
757         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
758                 device_printf(sc->sc_dev,
759                     "device uses unsupported power ops\n");
760                 error = ENOTSUP;
761         }
762
763  out:
764         if (error) {
765                 fw->fw_status = IWM_FW_STATUS_NONE;
766                 if (fw->fw_rawdata != NULL)
767                         iwm_fw_info_free(fw);
768         } else
769                 fw->fw_status = IWM_FW_STATUS_DONE;
770         wakeup(&sc->sc_fw);
771
772         return error;
773 }
774
775 /*
776  * DMA resource routines
777  */
778
779 static void
780 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
781 {
782         if (error != 0)
783                 return;
784         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
785         *(bus_addr_t *)arg = segs[0].ds_addr;
786 }
787
788 static int
789 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
790     bus_size_t size, bus_size_t alignment)
791 {
792         int error;
793
794         dma->tag = NULL;
795         dma->size = size;
796
797 #if defined(__DragonFly__)
798         error = bus_dma_tag_create(tag, alignment,
799                                    0,
800                                    BUS_SPACE_MAXADDR_32BIT,
801                                    BUS_SPACE_MAXADDR,
802                                    NULL, NULL,
803                                    size, 1, size,
804                                    BUS_DMA_NOWAIT, &dma->tag);
805 #else
806         error = bus_dma_tag_create(tag, alignment,
807             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
808             1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
809 #endif
810         if (error != 0)
811                 goto fail;
812
813         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
814             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
815         if (error != 0)
816                 goto fail;
817
818         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
819             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
820         if (error != 0)
821                 goto fail;
822
823         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
824
825         return 0;
826
827 fail:
828         iwm_dma_contig_free(dma);
829
830         return error;
831 }
832
833 static void
834 iwm_dma_contig_free(struct iwm_dma_info *dma)
835 {
836         if (dma->map != NULL) {
837                 if (dma->vaddr != NULL) {
838                         bus_dmamap_sync(dma->tag, dma->map,
839                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
840                         bus_dmamap_unload(dma->tag, dma->map);
841                         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
842                         dma->vaddr = NULL;
843                 }
844                 bus_dmamap_destroy(dma->tag, dma->map);
845                 dma->map = NULL;
846         }
847         if (dma->tag != NULL) {
848                 bus_dma_tag_destroy(dma->tag);
849                 dma->tag = NULL;
850         }
851
852 }
853
854 /* fwmem is used to load firmware onto the card */
855 static int
856 iwm_alloc_fwmem(struct iwm_softc *sc)
857 {
858         /* Must be aligned on a 16-byte boundary. */
859         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
860             sc->sc_fwdmasegsz, 16);
861 }
862
863 static void
864 iwm_free_fwmem(struct iwm_softc *sc)
865 {
866         iwm_dma_contig_free(&sc->fw_dma);
867 }
868
869 /* tx scheduler rings.  not used? */
870 static int
871 iwm_alloc_sched(struct iwm_softc *sc)
872 {
873         int rv;
874
875         /* TX scheduler rings must be aligned on a 1KB boundary. */
876         rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
877             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
878         return rv;
879 }
880
881 static void
882 iwm_free_sched(struct iwm_softc *sc)
883 {
884         iwm_dma_contig_free(&sc->sched_dma);
885 }
886
887 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
888 static int
889 iwm_alloc_kw(struct iwm_softc *sc)
890 {
891         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
892 }
893
894 static void
895 iwm_free_kw(struct iwm_softc *sc)
896 {
897         iwm_dma_contig_free(&sc->kw_dma);
898 }
899
900 /* interrupt cause table */
901 static int
902 iwm_alloc_ict(struct iwm_softc *sc)
903 {
904         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
905             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
906 }
907
908 static void
909 iwm_free_ict(struct iwm_softc *sc)
910 {
911         iwm_dma_contig_free(&sc->ict_dma);
912 }
913
914 static int
915 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
916 {
917         bus_size_t size;
918         int i, error;
919
920         ring->cur = 0;
921
922         /* Allocate RX descriptors (256-byte aligned). */
923         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
924         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
925         if (error != 0) {
926                 device_printf(sc->sc_dev,
927                     "could not allocate RX ring DMA memory\n");
928                 goto fail;
929         }
930         ring->desc = ring->desc_dma.vaddr;
931
932         /* Allocate RX status area (16-byte aligned). */
933         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
934             sizeof(*ring->stat), 16);
935         if (error != 0) {
936                 device_printf(sc->sc_dev,
937                     "could not allocate RX status DMA memory\n");
938                 goto fail;
939         }
940         ring->stat = ring->stat_dma.vaddr;
941
942         /* Create RX buffer DMA tag. */
943 #if defined(__DragonFly__)
944         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
945                                    0,
946                                    BUS_SPACE_MAXADDR_32BIT,
947                                    BUS_SPACE_MAXADDR,
948                                    NULL, NULL,
949                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
950                                    BUS_DMA_NOWAIT, &ring->data_dmat);
951 #else
952         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
953             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
954             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
955             &ring->data_dmat);
956 #endif
957         if (error != 0) {
958                 device_printf(sc->sc_dev,
959                     "%s: could not create RX buf DMA tag, error %d\n",
960                     __func__, error);
961                 goto fail;
962         }
963
964         /*
965          * Allocate and map RX buffers.
966          */
967         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
968                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
969                         goto fail;
970                 }
971         }
972         return 0;
973
974 fail:   iwm_free_rx_ring(sc, ring);
975         return error;
976 }
977
978 static void
979 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
980 {
981         /* XXX conditional nic locks are stupid */
982         /* XXX print out if we can't lock the NIC? */
983         if (iwm_nic_lock(sc)) {
984                 /* XXX handle if RX stop doesn't finish? */
985                 (void) iwm_pcie_rx_stop(sc);
986                 iwm_nic_unlock(sc);
987         }
988         ring->cur = 0;
989
990         /*
991          * The hw rx ring index in shared memory must also be cleared,
992          * otherwise the discrepancy can cause reprocessing chaos.
993          */
994         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
995 }
996
997 static void
998 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
999 {
1000         int i;
1001
1002         iwm_dma_contig_free(&ring->desc_dma);
1003         iwm_dma_contig_free(&ring->stat_dma);
1004
1005         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1006                 struct iwm_rx_data *data = &ring->data[i];
1007
1008                 if (data->m != NULL) {
1009                         bus_dmamap_sync(ring->data_dmat, data->map,
1010                             BUS_DMASYNC_POSTREAD);
1011                         bus_dmamap_unload(ring->data_dmat, data->map);
1012                         m_freem(data->m);
1013                         data->m = NULL;
1014                 }
1015                 if (data->map != NULL) {
1016                         bus_dmamap_destroy(ring->data_dmat, data->map);
1017                         data->map = NULL;
1018                 }
1019         }
1020         if (ring->data_dmat != NULL) {
1021                 bus_dma_tag_destroy(ring->data_dmat);
1022                 ring->data_dmat = NULL;
1023         }
1024 }
1025
1026 static int
1027 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1028 {
1029         bus_addr_t paddr;
1030         bus_size_t size;
1031         int i, error;
1032
1033         ring->qid = qid;
1034         ring->queued = 0;
1035         ring->cur = 0;
1036
1037         /* Allocate TX descriptors (256-byte aligned). */
1038         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1039         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1040         if (error != 0) {
1041                 device_printf(sc->sc_dev,
1042                     "could not allocate TX ring DMA memory\n");
1043                 goto fail;
1044         }
1045         ring->desc = ring->desc_dma.vaddr;
1046
1047         /*
1048          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1049          * to allocate commands space for other rings.
1050          */
1051         if (qid > IWM_MVM_CMD_QUEUE)
1052                 return 0;
1053
1054         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1055         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1056         if (error != 0) {
1057                 device_printf(sc->sc_dev,
1058                     "could not allocate TX cmd DMA memory\n");
1059                 goto fail;
1060         }
1061         ring->cmd = ring->cmd_dma.vaddr;
1062
1063 #if defined(__DragonFly__)
1064         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1065                                    0,
1066                                    BUS_SPACE_MAXADDR_32BIT,
1067                                    BUS_SPACE_MAXADDR,
1068                                    NULL, NULL,
1069                                    MCLBYTES, IWM_MAX_SCATTER - 1, MCLBYTES,
1070                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1071 #else
1072         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1073             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1074             IWM_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1075             &ring->data_dmat);
1076 #endif
1077         if (error != 0) {
1078                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1079                 goto fail;
1080         }
1081
1082         paddr = ring->cmd_dma.paddr;
1083         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1084                 struct iwm_tx_data *data = &ring->data[i];
1085
1086                 data->cmd_paddr = paddr;
1087                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1088                     + offsetof(struct iwm_tx_cmd, scratch);
1089                 paddr += sizeof(struct iwm_device_cmd);
1090
1091                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1092                 if (error != 0) {
1093                         device_printf(sc->sc_dev,
1094                             "could not create TX buf DMA map\n");
1095                         goto fail;
1096                 }
1097         }
1098         KASSERT(paddr == ring->cmd_dma.paddr + size,
1099             ("invalid physical address"));
1100         return 0;
1101
1102 fail:   iwm_free_tx_ring(sc, ring);
1103         return error;
1104 }
1105
1106 static void
1107 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1108 {
1109         int i;
1110
1111         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1112                 struct iwm_tx_data *data = &ring->data[i];
1113
1114                 if (data->m != NULL) {
1115                         bus_dmamap_sync(ring->data_dmat, data->map,
1116                             BUS_DMASYNC_POSTWRITE);
1117                         bus_dmamap_unload(ring->data_dmat, data->map);
1118                         m_freem(data->m);
1119                         data->m = NULL;
1120                 }
1121         }
1122         /* Clear TX descriptors. */
1123         memset(ring->desc, 0, ring->desc_dma.size);
1124         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1125             BUS_DMASYNC_PREWRITE);
1126         sc->qfullmsk &= ~(1 << ring->qid);
1127         ring->queued = 0;
1128         ring->cur = 0;
1129 }
1130
1131 static void
1132 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1133 {
1134         int i;
1135
1136         iwm_dma_contig_free(&ring->desc_dma);
1137         iwm_dma_contig_free(&ring->cmd_dma);
1138
1139         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1140                 struct iwm_tx_data *data = &ring->data[i];
1141
1142                 if (data->m != NULL) {
1143                         bus_dmamap_sync(ring->data_dmat, data->map,
1144                             BUS_DMASYNC_POSTWRITE);
1145                         bus_dmamap_unload(ring->data_dmat, data->map);
1146                         m_freem(data->m);
1147                         data->m = NULL;
1148                 }
1149                 if (data->map != NULL) {
1150                         bus_dmamap_destroy(ring->data_dmat, data->map);
1151                         data->map = NULL;
1152                 }
1153         }
1154         if (ring->data_dmat != NULL) {
1155                 bus_dma_tag_destroy(ring->data_dmat);
1156                 ring->data_dmat = NULL;
1157         }
1158 }
1159
1160 /*
1161  * High-level hardware frobbing routines
1162  */
1163
1164 static void
1165 iwm_enable_interrupts(struct iwm_softc *sc)
1166 {
1167         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1168         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1169 }
1170
1171 static void
1172 iwm_restore_interrupts(struct iwm_softc *sc)
1173 {
1174         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1175 }
1176
1177 static void
1178 iwm_disable_interrupts(struct iwm_softc *sc)
1179 {
1180         /* disable interrupts */
1181         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1182
1183         /* acknowledge all interrupts */
1184         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1185         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1186 }
1187
1188 static void
1189 iwm_ict_reset(struct iwm_softc *sc)
1190 {
1191         iwm_disable_interrupts(sc);
1192
1193         /* Reset ICT table. */
1194         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1195         sc->ict_cur = 0;
1196
1197         /* Set physical address of ICT table (4KB aligned). */
1198         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1199             IWM_CSR_DRAM_INT_TBL_ENABLE
1200             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1201             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1202
1203         /* Switch to ICT interrupt mode in driver. */
1204         sc->sc_flags |= IWM_FLAG_USE_ICT;
1205
1206         /* Re-enable interrupts. */
1207         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1208         iwm_enable_interrupts(sc);
1209 }
1210
1211 /* iwlwifi pcie/trans.c */
1212
1213 /*
1214  * Since this .. hard-resets things, it's time to actually
1215  * mark the first vap (if any) as having no mac context.
1216  * It's annoying, but since the driver is potentially being
1217  * stop/start'ed whilst active (thanks openbsd port!) we
1218  * have to correctly track this.
1219  */
1220 static void
1221 iwm_stop_device(struct iwm_softc *sc)
1222 {
1223         struct ieee80211com *ic = sc->sc_ic;
1224         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1225         int chnl, ntries;
1226         int qid;
1227
1228         /* tell the device to stop sending interrupts */
1229         iwm_disable_interrupts(sc);
1230
1231         /*
1232          * FreeBSD-local: mark the first vap as not-uploaded,
1233          * so the next transition through auth/assoc
1234          * will correctly populate the MAC context.
1235          */
1236         if (vap) {
1237                 struct iwm_vap *iv = IWM_VAP(vap);
1238                 iv->is_uploaded = 0;
1239         }
1240
1241         /* device going down, Stop using ICT table */
1242         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1243
1244         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1245
1246         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1247
1248         /* Stop all DMA channels. */
1249         if (iwm_nic_lock(sc)) {
1250                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1251                         IWM_WRITE(sc,
1252                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1253                         for (ntries = 0; ntries < 200; ntries++) {
1254                                 uint32_t r;
1255
1256                                 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1257                                 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1258                                     chnl))
1259                                         break;
1260                                 DELAY(20);
1261                         }
1262                 }
1263                 iwm_nic_unlock(sc);
1264         }
1265
1266         /* Stop RX ring. */
1267         iwm_reset_rx_ring(sc, &sc->rxq);
1268
1269         /* Reset all TX rings. */
1270         for (qid = 0; qid < nitems(sc->txq); qid++)
1271                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1272
1273         /*
1274          * Power-down device's busmaster DMA clocks
1275          */
1276         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1277         DELAY(5);
1278
1279         /* Make sure (redundant) we've released our request to stay awake */
1280         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1281             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1282
1283         /* Stop the device, and put it in low power state */
1284         iwm_apm_stop(sc);
1285
1286         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1287          * Clean again the interrupt here
1288          */
1289         iwm_disable_interrupts(sc);
1290         /* stop and reset the on-board processor */
1291         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1292
1293         /*
1294          * Even if we stop the HW, we still want the RF kill
1295          * interrupt
1296          */
1297         iwm_enable_rfkill_int(sc);
1298         iwm_check_rfkill(sc);
1299 }
1300
1301 /* iwlwifi: mvm/ops.c */
1302 static void
1303 iwm_mvm_nic_config(struct iwm_softc *sc)
1304 {
1305         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1306         uint32_t reg_val = 0;
1307
1308         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1309             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1310         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1311             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1312         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1313             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1314
1315         /* SKU control */
1316         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1317             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1318         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1319             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1320
1321         /* radio configuration */
1322         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1323         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1324         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1325
1326         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1327
1328         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1329             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1330             radio_cfg_step, radio_cfg_dash);
1331
1332         /*
1333          * W/A : NIC is stuck in a reset state after Early PCIe power off
1334          * (PCIe power is lost before PERST# is asserted), causing ME FW
1335          * to lose ownership and not being able to obtain it back.
1336          */
1337         iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1338             IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1339             ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1340 }
1341
1342 static int
1343 iwm_nic_rx_init(struct iwm_softc *sc)
1344 {
1345         if (!iwm_nic_lock(sc))
1346                 return EBUSY;
1347
1348         /*
1349          * Initialize RX ring.  This is from the iwn driver.
1350          */
1351         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1352
1353         /* stop DMA */
1354         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1355         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1356         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1357         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1358         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1359
1360         /* Set physical address of RX ring (256-byte aligned). */
1361         IWM_WRITE(sc,
1362             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1363
1364         /* Set physical address of RX status (16-byte aligned). */
1365         IWM_WRITE(sc,
1366             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1367
1368 #if defined(__DragonFly__)
1369         /* Force serialization (probably not needed but don't trust the HW) */
1370         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1371 #endif
1372
1373         /* Enable RX. */
1374         /*
1375          * Note: Linux driver also sets this:
1376          *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1377          *
1378          * It causes weird behavior.  YMMV.
1379          */
1380         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1381             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1382             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1383             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1384             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1385             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1386
1387         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1388
1389         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1390         if (sc->host_interrupt_operation_mode)
1391                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1392
1393         /*
1394          * Thus sayeth el jefe (iwlwifi) via a comment:
1395          *
1396          * This value should initially be 0 (before preparing any
1397          * RBs), should be 8 after preparing the first 8 RBs (for example)
1398          */
1399         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1400
1401         iwm_nic_unlock(sc);
1402
1403         return 0;
1404 }
1405
1406 static int
1407 iwm_nic_tx_init(struct iwm_softc *sc)
1408 {
1409         int qid;
1410
1411         if (!iwm_nic_lock(sc))
1412                 return EBUSY;
1413
1414         /* Deactivate TX scheduler. */
1415         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1416
1417         /* Set physical address of "keep warm" page (16-byte aligned). */
1418         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1419
1420         /* Initialize TX rings. */
1421         for (qid = 0; qid < nitems(sc->txq); qid++) {
1422                 struct iwm_tx_ring *txq = &sc->txq[qid];
1423
1424                 /* Set physical address of TX ring (256-byte aligned). */
1425                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1426                     txq->desc_dma.paddr >> 8);
1427                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1428                     "%s: loading ring %d descriptors (%p) at %lx\n",
1429                     __func__,
1430                     qid, txq->desc,
1431                     (unsigned long) (txq->desc_dma.paddr >> 8));
1432         }
1433         iwm_nic_unlock(sc);
1434
1435         return 0;
1436 }
1437
1438 static int
1439 iwm_nic_init(struct iwm_softc *sc)
1440 {
1441         int error;
1442
1443         iwm_apm_init(sc);
1444         iwm_set_pwr(sc);
1445
1446         iwm_mvm_nic_config(sc);
1447
1448         if ((error = iwm_nic_rx_init(sc)) != 0)
1449                 return error;
1450
1451         /*
1452          * Ditto for TX, from iwn
1453          */
1454         if ((error = iwm_nic_tx_init(sc)) != 0)
1455                 return error;
1456
1457         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1458             "%s: shadow registers enabled\n", __func__);
1459         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1460
1461         return 0;
1462 }
1463
1464 enum iwm_mvm_tx_fifo {
1465         IWM_MVM_TX_FIFO_BK = 0,
1466         IWM_MVM_TX_FIFO_BE,
1467         IWM_MVM_TX_FIFO_VI,
1468         IWM_MVM_TX_FIFO_VO,
1469         IWM_MVM_TX_FIFO_MCAST = 5,
1470 };
1471
1472 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1473         IWM_MVM_TX_FIFO_VO,
1474         IWM_MVM_TX_FIFO_VI,
1475         IWM_MVM_TX_FIFO_BE,
1476         IWM_MVM_TX_FIFO_BK,
1477 };
1478
1479 static void
1480 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1481 {
1482         if (!iwm_nic_lock(sc)) {
1483                 device_printf(sc->sc_dev,
1484                     "%s: cannot enable txq %d\n",
1485                     __func__,
1486                     qid);
1487                 return; /* XXX return EBUSY */
1488         }
1489
1490         /* unactivate before configuration */
1491         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1492             (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1493             | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1494
1495         if (qid != IWM_MVM_CMD_QUEUE) {
1496                 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1497         }
1498
1499         iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1500
1501         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1502         iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1503
1504         iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1505         /* Set scheduler window size and frame limit. */
1506         iwm_write_mem32(sc,
1507             sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1508             sizeof(uint32_t),
1509             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1510             IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1511             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1512             IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1513
1514         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1515             (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1516             (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1517             (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1518             IWM_SCD_QUEUE_STTS_REG_MSK);
1519
1520         iwm_nic_unlock(sc);
1521
1522         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1523             "%s: enabled txq %d FIFO %d\n",
1524             __func__, qid, fifo);
1525 }
1526
1527 static int
1528 iwm_post_alive(struct iwm_softc *sc)
1529 {
1530         int nwords;
1531         int error, chnl;
1532
1533         if (!iwm_nic_lock(sc))
1534                 return EBUSY;
1535
1536         if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1537                 device_printf(sc->sc_dev,
1538                     "%s: sched addr mismatch",
1539                     __func__);
1540                 error = EINVAL;
1541                 goto out;
1542         }
1543
1544         iwm_ict_reset(sc);
1545
1546         /* Clear TX scheduler state in SRAM. */
1547         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1548             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1549             / sizeof(uint32_t);
1550         error = iwm_write_mem(sc,
1551             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1552             NULL, nwords);
1553         if (error)
1554                 goto out;
1555
1556         /* Set physical address of TX scheduler rings (1KB aligned). */
1557         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1558
1559         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1560
1561         /* enable command channel */
1562         iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1563
1564         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1565
1566         /* Enable DMA channels. */
1567         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1568                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1569                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1570                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1571         }
1572
1573         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1574             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1575
1576         /* Enable L1-Active */
1577         iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1578             IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1579
1580  out:
1581         iwm_nic_unlock(sc);
1582         return error;
1583 }
1584
1585 /*
1586  * NVM read access and content parsing.  We do not support
1587  * external NVM or writing NVM.
1588  * iwlwifi/mvm/nvm.c
1589  */
1590
1591 /* list of NVM sections we are allowed/need to read */
1592 const int nvm_to_read[] = {
1593         IWM_NVM_SECTION_TYPE_HW,
1594         IWM_NVM_SECTION_TYPE_SW,
1595         IWM_NVM_SECTION_TYPE_CALIBRATION,
1596         IWM_NVM_SECTION_TYPE_PRODUCTION,
1597 };
1598
1599 /* Default NVM size to read */
1600 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1601 #define IWM_MAX_NVM_SECTION_SIZE 7000
1602
1603 #define IWM_NVM_WRITE_OPCODE 1
1604 #define IWM_NVM_READ_OPCODE 0
1605
1606 static int
1607 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1608         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1609 {
1610         offset = 0;
1611         struct iwm_nvm_access_cmd nvm_access_cmd = {
1612                 .offset = htole16(offset),
1613                 .length = htole16(length),
1614                 .type = htole16(section),
1615                 .op_code = IWM_NVM_READ_OPCODE,
1616         };
1617         struct iwm_nvm_access_resp *nvm_resp;
1618         struct iwm_rx_packet *pkt;
1619         struct iwm_host_cmd cmd = {
1620                 .id = IWM_NVM_ACCESS_CMD,
1621                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1622                     IWM_CMD_SEND_IN_RFKILL,
1623                 .data = { &nvm_access_cmd, },
1624         };
1625         int ret, bytes_read, offset_read;
1626         uint8_t *resp_data;
1627
1628         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1629
1630         ret = iwm_send_cmd(sc, &cmd);
1631         if (ret)
1632                 return ret;
1633
1634         pkt = cmd.resp_pkt;
1635         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1636                 device_printf(sc->sc_dev,
1637                     "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1638                     __func__, pkt->hdr.flags);
1639                 ret = EIO;
1640                 goto exit;
1641         }
1642
1643         /* Extract NVM response */
1644         nvm_resp = (void *)pkt->data;
1645
1646         ret = le16toh(nvm_resp->status);
1647         bytes_read = le16toh(nvm_resp->length);
1648         offset_read = le16toh(nvm_resp->offset);
1649         resp_data = nvm_resp->data;
1650         if (ret) {
1651                 device_printf(sc->sc_dev,
1652                     "%s: NVM access command failed with status %d\n",
1653                     __func__, ret);
1654                 ret = EINVAL;
1655                 goto exit;
1656         }
1657
1658         if (offset_read != offset) {
1659                 device_printf(sc->sc_dev,
1660                     "%s: NVM ACCESS response with invalid offset %d\n",
1661                     __func__, offset_read);
1662                 ret = EINVAL;
1663                 goto exit;
1664         }
1665
1666         memcpy(data + offset, resp_data, bytes_read);
1667         *len = bytes_read;
1668
1669  exit:
1670         iwm_free_resp(sc, &cmd);
1671         return ret;
1672 }
1673
1674 /*
1675  * Reads an NVM section completely.
1676  * NICs prior to 7000 family doesn't have a real NVM, but just read
1677  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1678  * by uCode, we need to manually check in this case that we don't
1679  * overflow and try to read more than the EEPROM size.
1680  * For 7000 family NICs, we supply the maximal size we can read, and
1681  * the uCode fills the response with as much data as we can,
1682  * without overflowing, so no check is needed.
1683  */
1684 static int
1685 iwm_nvm_read_section(struct iwm_softc *sc,
1686         uint16_t section, uint8_t *data, uint16_t *len)
1687 {
1688         uint16_t length, seglen;
1689         int error;
1690
1691         /* Set nvm section read length */
1692         length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1693         *len = 0;
1694
1695         /* Read the NVM until exhausted (reading less than requested) */
1696         while (seglen == length) {
1697                 error = iwm_nvm_read_chunk(sc,
1698                     section, *len, length, data, &seglen);
1699                 if (error) {
1700                         device_printf(sc->sc_dev,
1701                             "Cannot read NVM from section "
1702                             "%d offset %d, length %d\n",
1703                             section, *len, length);
1704                         return error;
1705                 }
1706                 *len += seglen;
1707         }
1708
1709         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1710             "NVM section %d read completed\n", section);
1711         return 0;
1712 }
1713
1714 /*
1715  * BEGIN IWM_NVM_PARSE
1716  */
1717
1718 /* iwlwifi/iwl-nvm-parse.c */
1719
1720 /* NVM offsets (in words) definitions */
1721 enum wkp_nvm_offsets {
1722         /* NVM HW-Section offset (in words) definitions */
1723         IWM_HW_ADDR = 0x15,
1724
1725 /* NVM SW-Section offset (in words) definitions */
1726         IWM_NVM_SW_SECTION = 0x1C0,
1727         IWM_NVM_VERSION = 0,
1728         IWM_RADIO_CFG = 1,
1729         IWM_SKU = 2,
1730         IWM_N_HW_ADDRS = 3,
1731         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1732
1733 /* NVM calibration section offset (in words) definitions */
1734         IWM_NVM_CALIB_SECTION = 0x2B8,
1735         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1736 };
1737
1738 /* SKU Capabilities (actual values from NVM definition) */
1739 enum nvm_sku_bits {
1740         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1741         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1742         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1743         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1744 };
1745
1746 /* radio config bits (actual values from NVM definition) */
1747 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1748 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1749 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1750 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1751 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1752 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1753
1754 #define DEFAULT_MAX_TX_POWER 16
1755
1756 /**
1757  * enum iwm_nvm_channel_flags - channel flags in NVM
1758  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1759  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1760  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1761  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1762  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1763  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1764  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1765  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1766  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1767  */
1768 enum iwm_nvm_channel_flags {
1769         IWM_NVM_CHANNEL_VALID = (1 << 0),
1770         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1771         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1772         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1773         IWM_NVM_CHANNEL_DFS = (1 << 7),
1774         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1775         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1776         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1777         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1778 };
1779
1780 /*
1781  * Add a channel to the net80211 channel list.
1782  *
1783  * ieee is the ieee channel number
1784  * ch_idx is channel index.
1785  * mode is the channel mode - CHAN_A, CHAN_B, CHAN_G.
1786  * ch_flags is the iwm channel flags.
1787  *
1788  * Return 0 on OK, < 0 on error.
1789  */
1790 static int
1791 iwm_init_net80211_channel(struct iwm_softc *sc, int ieee, int ch_idx,
1792     int mode, uint16_t ch_flags)
1793 {
1794         /* XXX for now, no overflow checking! */
1795         struct ieee80211com *ic =  sc->sc_ic;
1796         int is_5ghz, flags;
1797         struct ieee80211_channel *channel;
1798
1799         channel = &ic->ic_channels[ic->ic_nchans++];
1800         channel->ic_ieee = ieee;
1801
1802         is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
1803         if (!is_5ghz) {
1804                 flags = IEEE80211_CHAN_2GHZ;
1805                 channel->ic_flags = mode;
1806         } else {
1807                 flags = IEEE80211_CHAN_5GHZ;
1808                 channel->ic_flags = mode;
1809         }
1810         channel->ic_freq = ieee80211_ieee2mhz(ieee, flags);
1811
1812         if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
1813                 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
1814         return (0);
1815 }
1816
1817 static void
1818 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
1819 {
1820         struct ieee80211com *ic =  sc->sc_ic;
1821         struct iwm_nvm_data *data = &sc->sc_nvm;
1822         int ch_idx;
1823         uint16_t ch_flags;
1824         int hw_value;
1825
1826         for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
1827                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1828
1829                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
1830                     !data->sku_cap_band_52GHz_enable)
1831                         ch_flags &= ~IWM_NVM_CHANNEL_VALID;
1832
1833                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1834                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1835                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1836                             iwm_nvm_channels[ch_idx],
1837                             ch_flags,
1838                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1839                             "5.2" : "2.4");
1840                         continue;
1841                 }
1842
1843                 hw_value = iwm_nvm_channels[ch_idx];
1844
1845                 /* 5GHz? */
1846                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS) {
1847                         (void) iwm_init_net80211_channel(sc, hw_value,
1848                             ch_idx,
1849                             IEEE80211_CHAN_A,
1850                             ch_flags);
1851                 } else {
1852                         (void) iwm_init_net80211_channel(sc, hw_value,
1853                             ch_idx,
1854                             IEEE80211_CHAN_B,
1855                             ch_flags);
1856                         /* If it's not channel 13, also add 11g */
1857                         if (hw_value != 13)
1858                                 (void) iwm_init_net80211_channel(sc, hw_value,
1859                                     ch_idx,
1860                                     IEEE80211_CHAN_G,
1861                                     ch_flags);
1862                 }
1863
1864                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1865                     "Ch. %d Flags %x [%sGHz] - Added\n",
1866                     iwm_nvm_channels[ch_idx],
1867                     ch_flags,
1868                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1869                     "5.2" : "2.4");
1870         }
1871         ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1872 }
1873
1874 static int
1875 iwm_parse_nvm_data(struct iwm_softc *sc,
1876         const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1877         const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1878 {
1879         struct iwm_nvm_data *data = &sc->sc_nvm;
1880         uint8_t hw_addr[IEEE80211_ADDR_LEN];
1881         uint16_t radio_cfg, sku;
1882
1883         data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1884
1885         radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1886         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1887         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1888         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1889         data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1890         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1891         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1892
1893         sku = le16_to_cpup(nvm_sw + IWM_SKU);
1894         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1895         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1896         data->sku_cap_11n_enable = 0;
1897
1898         if (!data->valid_tx_ant || !data->valid_rx_ant) {
1899                 device_printf(sc->sc_dev,
1900                     "%s: invalid antennas (0x%x, 0x%x)\n",
1901                     __func__, data->valid_tx_ant,
1902                     data->valid_rx_ant);
1903                 return EINVAL;
1904         }
1905
1906         data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1907
1908         data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1909         data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1910
1911         /* The byte order is little endian 16 bit, meaning 214365 */
1912         IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1913         data->hw_addr[0] = hw_addr[1];
1914         data->hw_addr[1] = hw_addr[0];
1915         data->hw_addr[2] = hw_addr[3];
1916         data->hw_addr[3] = hw_addr[2];
1917         data->hw_addr[4] = hw_addr[5];
1918         data->hw_addr[5] = hw_addr[4];
1919
1920         iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
1921         data->calib_version = 255;   /* TODO:
1922                                         this value will prevent some checks from
1923                                         failing, we need to check if this
1924                                         field is still needed, and if it does,
1925                                         where is it in the NVM */
1926
1927         return 0;
1928 }
1929
1930 /*
1931  * END NVM PARSE
1932  */
1933
1934 struct iwm_nvm_section {
1935         uint16_t length;
1936         const uint8_t *data;
1937 };
1938
1939 static int
1940 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1941 {
1942         const uint16_t *hw, *sw, *calib;
1943
1944         /* Checking for required sections */
1945         if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1946             !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1947                 device_printf(sc->sc_dev,
1948                     "%s: Can't parse empty NVM sections\n",
1949                     __func__);
1950                 return ENOENT;
1951         }
1952
1953         hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1954         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1955         calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1956         return iwm_parse_nvm_data(sc, hw, sw, calib,
1957             IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1958 }
1959
1960 static int
1961 iwm_nvm_init(struct iwm_softc *sc)
1962 {
1963         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1964         int i, section, error;
1965         uint16_t len;
1966         uint8_t *nvm_buffer, *temp;
1967
1968         /* Read From FW NVM */
1969         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1970             "%s: Read NVM\n",
1971             __func__);
1972
1973         /* TODO: find correct NVM max size for a section */
1974         nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1975         if (nvm_buffer == NULL)
1976                 return (ENOMEM);
1977         for (i = 0; i < nitems(nvm_to_read); i++) {
1978                 section = nvm_to_read[i];
1979                 KASSERT(section <= nitems(nvm_sections),
1980                     ("too many sections"));
1981
1982                 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1983                 if (error)
1984                         break;
1985
1986                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1987                 if (temp == NULL) {
1988                         error = ENOMEM;
1989                         break;
1990                 }
1991                 memcpy(temp, nvm_buffer, len);
1992                 nvm_sections[section].data = temp;
1993                 nvm_sections[section].length = len;
1994         }
1995         kfree(nvm_buffer, M_DEVBUF);
1996         if (error)
1997                 return error;
1998
1999         return iwm_parse_nvm_sections(sc, nvm_sections);
2000 }
2001
2002 /*
2003  * Firmware loading gunk.  This is kind of a weird hybrid between the
2004  * iwn driver and the Linux iwlwifi driver.
2005  */
2006
2007 static int
2008 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2009         const uint8_t *section, uint32_t byte_cnt)
2010 {
2011         struct iwm_dma_info *dma = &sc->fw_dma;
2012         int error;
2013
2014         /* Copy firmware section into pre-allocated DMA-safe memory. */
2015         memcpy(dma->vaddr, section, byte_cnt);
2016         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2017
2018         if (!iwm_nic_lock(sc))
2019                 return EBUSY;
2020
2021         sc->sc_fw_chunk_done = 0;
2022
2023         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2024             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2025         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2026             dst_addr);
2027         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2028             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2029         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2030             (iwm_get_dma_hi_addr(dma->paddr)
2031               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2032         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2033             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2034             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2035             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2036         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2037             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2038             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2039             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2040
2041         iwm_nic_unlock(sc);
2042
2043         /* wait 1s for this segment to load */
2044         error = 0;
2045         while (!sc->sc_fw_chunk_done) {
2046 #if defined(__DragonFly__)
2047                 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2048 #else
2049                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2050 #endif
2051                 if (error)
2052                         break;
2053         }
2054
2055         return error;
2056 }
2057
2058 static int
2059 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2060 {
2061         struct iwm_fw_sects *fws;
2062         int error, i, w;
2063         const void *data;
2064         uint32_t dlen;
2065         uint32_t offset;
2066
2067         sc->sc_uc.uc_intr = 0;
2068
2069         fws = &sc->sc_fw.fw_sects[ucode_type];
2070         for (i = 0; i < fws->fw_count; i++) {
2071                 data = fws->fw_sect[i].fws_data;
2072                 dlen = fws->fw_sect[i].fws_len;
2073                 offset = fws->fw_sect[i].fws_devoff;
2074                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2075                     "LOAD FIRMWARE type %d offset %u len %d\n",
2076                     ucode_type, offset, dlen);
2077                 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2078                 if (error) {
2079                         device_printf(sc->sc_dev,
2080                             "%s: chunk %u of %u returned error %02d\n",
2081                             __func__, i, fws->fw_count, error);
2082                         return error;
2083                 }
2084         }
2085
2086         /* wait for the firmware to load */
2087         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2088
2089         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2090 #if defined(__DragonFly__)
2091                 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2092 #else
2093                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2094 #endif
2095         }
2096
2097         return error;
2098 }
2099
2100 /* iwlwifi: pcie/trans.c */
2101 static int
2102 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2103 {
2104         int error;
2105
2106         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2107
2108         if ((error = iwm_nic_init(sc)) != 0) {
2109                 device_printf(sc->sc_dev, "unable to init nic\n");
2110                 return error;
2111         }
2112
2113         /* make sure rfkill handshake bits are cleared */
2114         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2115         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2116             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2117
2118         /* clear (again), then enable host interrupts */
2119         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2120         iwm_enable_interrupts(sc);
2121
2122         /* really make sure rfkill handshake bits are cleared */
2123         /* maybe we should write a few times more?  just to make sure */
2124         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2125         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2126
2127         /* Load the given image to the HW */
2128         return iwm_load_firmware(sc, ucode_type);
2129 }
2130
2131 static int
2132 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2133 {
2134         return iwm_post_alive(sc);
2135 }
2136
2137 static int
2138 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2139 {
2140         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2141                 .valid = htole32(valid_tx_ant),
2142         };
2143
2144         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2145             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2146 }
2147
2148 /* iwlwifi: mvm/fw.c */
2149 static int
2150 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2151 {
2152         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2153         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2154
2155         /* Set parameters */
2156         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2157         phy_cfg_cmd.calib_control.event_trigger =
2158             sc->sc_default_calib[ucode_type].event_trigger;
2159         phy_cfg_cmd.calib_control.flow_trigger =
2160             sc->sc_default_calib[ucode_type].flow_trigger;
2161
2162         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2163             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2164         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2165             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2166 }
2167
2168 static int
2169 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2170         enum iwm_ucode_type ucode_type)
2171 {
2172         enum iwm_ucode_type old_type = sc->sc_uc_current;
2173         int error;
2174
2175         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2176                 kprintf("iwm_read_firmweare: failed %d\n",
2177                         error);
2178                 return error;
2179         }
2180
2181         sc->sc_uc_current = ucode_type;
2182         error = iwm_start_fw(sc, ucode_type);
2183         if (error) {
2184                 kprintf("iwm_start_fw: failed %d\n", error);
2185                 sc->sc_uc_current = old_type;
2186                 return error;
2187         }
2188
2189         error = iwm_fw_alive(sc, sc->sched_base);
2190         if (error) {
2191                 kprintf("iwm_fw_alive: failed %d\n", error);
2192         }
2193         return error;
2194 }
2195
2196 /*
2197  * mvm misc bits
2198  */
2199
2200 /*
2201  * follows iwlwifi/fw.c
2202  */
2203 static int
2204 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2205 {
2206         int error;
2207
2208         /* do not operate with rfkill switch turned on */
2209         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2210                 device_printf(sc->sc_dev,
2211                     "radio is disabled by hardware switch\n");
2212                 return EPERM;
2213         }
2214
2215         sc->sc_init_complete = 0;
2216         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2217             IWM_UCODE_TYPE_INIT)) != 0) {
2218                 kprintf("iwm_mvm_load_ucode_wait_alive: failed %d\n",
2219                         error);
2220                 return error;
2221         }
2222
2223         if (justnvm) {
2224                 if ((error = iwm_nvm_init(sc)) != 0) {
2225                         device_printf(sc->sc_dev, "failed to read nvm\n");
2226                         return error;
2227                 }
2228                 IEEE80211_ADDR_COPY(sc->sc_bssid, &sc->sc_nvm.hw_addr);
2229
2230                 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2231                     + sc->sc_capa_max_probe_len
2232                     + IWM_MAX_NUM_SCAN_CHANNELS
2233                     * sizeof(struct iwm_scan_channel);
2234                 sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2235                     M_INTWAIT);
2236                 if (sc->sc_scan_cmd == NULL)
2237                         return (ENOMEM);
2238
2239                 return 0;
2240         }
2241
2242         /* Send TX valid antennas before triggering calibrations */
2243         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2244                 kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2245                 return error;
2246         }
2247
2248         /*
2249         * Send phy configurations command to init uCode
2250         * to start the 16.0 uCode init image internal calibrations.
2251         */
2252         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2253                 device_printf(sc->sc_dev,
2254                     "%s: failed to run internal calibration: %d\n",
2255                     __func__, error);
2256                 return error;
2257         }
2258
2259         /*
2260          * Nothing to do but wait for the init complete notification
2261          * from the firmware
2262          */
2263         while (!sc->sc_init_complete) {
2264 #if defined(__DragonFly__)
2265                 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2266                                  0, "iwminit", 2*hz);
2267 #else
2268                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2269                                  0, "iwminit", 2*hz);
2270 #endif
2271                 if (error) {
2272                         kprintf("init complete failed %d\n",
2273                                 sc->sc_init_complete);
2274                         break;
2275                 }
2276         }
2277
2278         return error;
2279 }
2280
2281 /*
2282  * receive side
2283  */
2284
2285 /* (re)stock rx ring, called at init-time and at runtime */
2286 static int
2287 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2288 {
2289         struct iwm_rx_ring *ring = &sc->rxq;
2290         struct iwm_rx_data *data = &ring->data[idx];
2291         struct mbuf *m;
2292         int error;
2293         bus_addr_t paddr;
2294
2295         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2296         if (m == NULL)
2297                 return ENOBUFS;
2298
2299         if (data->m != NULL)
2300                 bus_dmamap_unload(ring->data_dmat, data->map);
2301
2302         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2303         error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2304         if (error != 0) {
2305                 device_printf(sc->sc_dev,
2306                     "%s: could not create RX buf DMA map, error %d\n",
2307                     __func__, error);
2308                 goto fail;
2309         }
2310         data->m = m;
2311         error = bus_dmamap_load(ring->data_dmat, data->map,
2312             mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2313             &paddr, BUS_DMA_NOWAIT);
2314         if (error != 0 && error != EFBIG) {
2315                 device_printf(sc->sc_dev,
2316                     "%s: can't not map mbuf, error %d\n", __func__,
2317                     error);
2318                 goto fail;
2319         }
2320         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2321
2322         /* Update RX descriptor. */
2323         KKASSERT((paddr & 255) == 0);
2324         ring->desc[idx] = htole32(paddr >> 8);
2325         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2326             BUS_DMASYNC_PREWRITE);
2327
2328         return 0;
2329 fail:
2330         return error;
2331 }
2332
2333 /* iwlwifi: mvm/rx.c */
2334 #define IWM_RSSI_OFFSET 50
2335 static int
2336 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2337 {
2338         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2339         uint32_t agc_a, agc_b;
2340         uint32_t val;
2341
2342         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2343         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2344         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2345
2346         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2347         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2348         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2349
2350         /*
2351          * dBm = rssi dB - agc dB - constant.
2352          * Higher AGC (higher radio gain) means lower signal.
2353          */
2354         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2355         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2356         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2357
2358         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2359             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2360             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2361
2362         return max_rssi_dbm;
2363 }
2364
2365 /* iwlwifi: mvm/rx.c */
2366 /*
2367  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2368  * values are reported by the fw as positive values - need to negate
2369  * to obtain their dBM.  Account for missing antennas by replacing 0
2370  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2371  */
2372 static int
2373 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2374 {
2375         int energy_a, energy_b, energy_c, max_energy;
2376         uint32_t val;
2377
2378         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2379         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2380             IWM_RX_INFO_ENERGY_ANT_A_POS;
2381         energy_a = energy_a ? -energy_a : -256;
2382         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2383             IWM_RX_INFO_ENERGY_ANT_B_POS;
2384         energy_b = energy_b ? -energy_b : -256;
2385         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2386             IWM_RX_INFO_ENERGY_ANT_C_POS;
2387         energy_c = energy_c ? -energy_c : -256;
2388         max_energy = MAX(energy_a, energy_b);
2389         max_energy = MAX(max_energy, energy_c);
2390
2391         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2392             "energy In A %d B %d C %d , and max %d\n",
2393             energy_a, energy_b, energy_c, max_energy);
2394
2395         return max_energy;
2396 }
2397
2398 static void
2399 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2400         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2401 {
2402         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2403
2404         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2405         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2406
2407         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2408 }
2409
2410 /*
2411  * Retrieve the average noise (in dBm) among receivers.
2412  */
2413 static int
2414 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2415 {
2416         int i, total, nbant, noise;
2417
2418         total = nbant = noise = 0;
2419         for (i = 0; i < 3; i++) {
2420                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2421                 if (noise) {
2422                         total += noise;
2423                         nbant++;
2424                 }
2425         }
2426
2427         /* There should be at least one antenna but check anyway. */
2428         return (nbant == 0) ? -127 : (total / nbant) - 107;
2429 }
2430
2431 /*
2432  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2433  *
2434  * Handles the actual data of the Rx packet from the fw
2435  */
2436 static void
2437 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2438         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2439 {
2440         struct ieee80211com *ic = sc->sc_ic;
2441         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2442         struct ieee80211_frame *wh;
2443         struct ieee80211_node *ni;
2444         struct ieee80211_rx_stats rxs;
2445         struct mbuf *m;
2446         struct iwm_rx_phy_info *phy_info;
2447         struct iwm_rx_mpdu_res_start *rx_res;
2448         uint32_t len;
2449         uint32_t rx_pkt_status;
2450         int rssi;
2451
2452         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2453
2454         phy_info = &sc->sc_last_phy_info;
2455         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2456         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2457         len = le16toh(rx_res->byte_count);
2458         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2459
2460         m = data->m;
2461         m->m_data = pkt->data + sizeof(*rx_res);
2462         m->m_pkthdr.len = m->m_len = len;
2463
2464         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2465                 device_printf(sc->sc_dev,
2466                     "dsp size out of range [0,20]: %d\n",
2467                     phy_info->cfg_phy_cnt);
2468                 return;
2469         }
2470
2471         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2472             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2473                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2474                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2475                 return; /* drop */
2476         }
2477
2478         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2479                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2480         } else {
2481                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2482         }
2483         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2484         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2485
2486         /* replenish ring for the buffer we're going to feed to the sharks */
2487         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2488                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2489                     __func__);
2490                 return;
2491         }
2492
2493         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2494
2495         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2496             "%s: phy_info: channel=%d, flags=0x%08x\n",
2497             __func__,
2498             le16toh(phy_info->channel),
2499             le16toh(phy_info->phy_flags));
2500
2501         /*
2502          * Populate an RX state struct with the provided information.
2503          */
2504         bzero(&rxs, sizeof(rxs));
2505 #if !defined(__DragonFly__)
2506         /* requires new fbsd stack */
2507         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2508 #endif
2509         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2510 #if defined(__DragonFly__)
2511         uint16_t c_freq;
2512         uint8_t c_ieee;
2513         c_ieee = le16toh(phy_info->channel);
2514         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2515                 c_freq = ieee80211_ieee2mhz(c_ieee, IEEE80211_CHAN_2GHZ);
2516         } else {
2517                 c_freq = ieee80211_ieee2mhz(c_ieee, IEEE80211_CHAN_5GHZ);
2518         }
2519 #else
2520         rxs.c_ieee = le16toh(phy_info->channel);
2521         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2522                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2523         } else {
2524                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2525         }
2526 #endif
2527         rxs.rssi = rssi - sc->sc_noise;
2528         rxs.nf = sc->sc_noise;
2529
2530         if (ieee80211_radiotap_active_vap(vap)) {
2531                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2532
2533                 tap->wr_flags = 0;
2534                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2535                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2536 #if defined(__DragonFly__)
2537                 tap->wr_chan_freq = htole16(c_freq);
2538 #else
2539                 tap->wr_chan_freq = htole16(rxs.c_freq);
2540 #endif
2541                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2542                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2543                 tap->wr_dbm_antsignal = (int8_t)rssi;
2544                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2545                 tap->wr_tsft = phy_info->system_timestamp;
2546                 switch (phy_info->rate) {
2547                 /* CCK rates. */
2548                 case  10: tap->wr_rate =   2; break;
2549                 case  20: tap->wr_rate =   4; break;
2550                 case  55: tap->wr_rate =  11; break;
2551                 case 110: tap->wr_rate =  22; break;
2552                 /* OFDM rates. */
2553                 case 0xd: tap->wr_rate =  12; break;
2554                 case 0xf: tap->wr_rate =  18; break;
2555                 case 0x5: tap->wr_rate =  24; break;
2556                 case 0x7: tap->wr_rate =  36; break;
2557                 case 0x9: tap->wr_rate =  48; break;
2558                 case 0xb: tap->wr_rate =  72; break;
2559                 case 0x1: tap->wr_rate =  96; break;
2560                 case 0x3: tap->wr_rate = 108; break;
2561                 /* Unknown rate: should not happen. */
2562                 default:  tap->wr_rate =   0;
2563                 }
2564         }
2565
2566         IWM_UNLOCK(sc);
2567         if (ni != NULL) {
2568                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2569                 ieee80211_input_mimo(ni, m, &rxs);
2570                 ieee80211_free_node(ni);
2571         } else {
2572                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2573                 ieee80211_input_mimo_all(ic, m, &rxs);
2574         }
2575         IWM_LOCK(sc);
2576 }
2577
2578 static void
2579 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2580         struct iwm_node *in)
2581 {
2582         struct ifnet *ifp = sc->sc_ifp;
2583         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2584         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2585         int failack = tx_resp->failure_frame;
2586
2587         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2588
2589         /* Update rate control statistics. */
2590         if (status != IWM_TX_STATUS_SUCCESS &&
2591             status != IWM_TX_STATUS_DIRECT_DONE) {
2592 #if defined(__DragonFly__)
2593                 ++ifp->if_oerrors;
2594 #else
2595                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2596 #endif
2597                 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2598                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2599
2600         } else {
2601 #if defined(__DragonFly__)
2602                 ++ifp->if_opackets;
2603 #else
2604                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2605 #endif
2606                 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2607                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2608         }
2609 }
2610
2611 static void
2612 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2613         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2614 {
2615         struct ifnet *ifp = sc->sc_ifp;
2616         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2617         int idx = cmd_hdr->idx;
2618         int qid = cmd_hdr->qid;
2619         struct iwm_tx_ring *ring = &sc->txq[qid];
2620         struct iwm_tx_data *txd = &ring->data[idx];
2621         struct iwm_node *in = txd->in;
2622
2623         if (txd->done) {
2624                 device_printf(sc->sc_dev,
2625                     "%s: got tx interrupt that's already been handled!\n",
2626                     __func__);
2627                 return;
2628         }
2629         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2630
2631         sc->sc_tx_timer = 0;
2632
2633         iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2634
2635         /* Unmap and free mbuf. */
2636         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2637         bus_dmamap_unload(ring->data_dmat, txd->map);
2638         m_freem(txd->m);
2639
2640         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2641             "free txd %p, in %p\n", txd, txd->in);
2642         KASSERT(txd->done == 0, ("txd not done"));
2643         txd->done = 1;
2644         KASSERT(txd->in, ("txd without node"));
2645
2646         txd->m = NULL;
2647         txd->in = NULL;
2648         ieee80211_free_node((struct ieee80211_node *)in);
2649
2650         if (--ring->queued < IWM_TX_RING_LOMARK) {
2651                 sc->qfullmsk &= ~(1 << ring->qid);
2652 #if defined(__DragonFly__)
2653                 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2654                         ifq_clr_oactive(&ifp->if_snd);
2655 #else
2656                 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_DRV_OACTIVE)) {
2657                         ifp->if_flags &= ~IFF_DRV_OACTIVE;
2658 #endif
2659                         /*
2660                          * Well, we're in interrupt context, but then again
2661                          * I guess net80211 does all sorts of stunts in
2662                          * interrupt context, so maybe this is no biggie.
2663                          */
2664                         iwm_start_locked(ifp);
2665                 }
2666         }
2667 }
2668
2669 /*
2670  * transmit side
2671  */
2672
2673 /*
2674  * Process a "command done" firmware notification.  This is where we wakeup
2675  * processes waiting for a synchronous command completion.
2676  * from if_iwn
2677  */
2678 static void
2679 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2680 {
2681         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2682         struct iwm_tx_data *data;
2683
2684         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2685                 return; /* Not a command ack. */
2686         }
2687
2688         data = &ring->data[pkt->hdr.idx];
2689
2690         /* If the command was mapped in an mbuf, free it. */
2691         if (data->m != NULL) {
2692                 bus_dmamap_sync(ring->data_dmat, data->map,
2693                     BUS_DMASYNC_POSTWRITE);
2694                 bus_dmamap_unload(ring->data_dmat, data->map);
2695                 m_freem(data->m);
2696                 data->m = NULL;
2697         }
2698         wakeup(&ring->desc[pkt->hdr.idx]);
2699 }
2700
2701 #if 0
2702 /*
2703  * necessary only for block ack mode
2704  */
2705 void
2706 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2707         uint16_t len)
2708 {
2709         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2710         uint16_t w_val;
2711
2712         scd_bc_tbl = sc->sched_dma.vaddr;
2713
2714         len += 8; /* magic numbers came naturally from paris */
2715         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2716                 len = roundup(len, 4) / 4;
2717
2718         w_val = htole16(sta_id << 12 | len);
2719
2720         /* Update TX scheduler. */
2721         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2722         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2723             BUS_DMASYNC_PREWRITE);
2724
2725         /* I really wonder what this is ?!? */
2726         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2727                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2728                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2729                     BUS_DMASYNC_PREWRITE);
2730         }
2731 }
2732 #endif
2733
2734 /*
2735  * Take an 802.11 (non-n) rate, find the relevant rate
2736  * table entry.  return the index into in_ridx[].
2737  *
2738  * The caller then uses that index back into in_ridx
2739  * to figure out the rate index programmed /into/
2740  * the firmware for this given node.
2741  */
2742 static int
2743 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2744     uint8_t rate)
2745 {
2746         int i;
2747         uint8_t r;
2748
2749         for (i = 0; i < nitems(in->in_ridx); i++) {
2750                 r = iwm_rates[in->in_ridx[i]].rate;
2751                 if (rate == r)
2752                         return (i);
2753         }
2754         /* XXX Return the first */
2755         /* XXX TODO: have it return the /lowest/ */
2756         return (0);
2757 }
2758
2759 /*
2760  * Fill in various bit for management frames, and leave them
2761  * unfilled for data frames (firmware takes care of that).
2762  * Return the selected TX rate.
2763  */
2764 static const struct iwm_rate *
2765 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2766         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2767 {
2768         struct ieee80211com *ic = sc->sc_ic;
2769         struct ieee80211_node *ni = &in->in_ni;
2770         const struct iwm_rate *rinfo;
2771         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2772         int ridx, rate_flags;
2773
2774         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2775         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2776
2777         /*
2778          * XXX TODO: everything about the rate selection here is terrible!
2779          */
2780
2781         if (type == IEEE80211_FC0_TYPE_DATA) {
2782                 int i;
2783                 /* for data frames, use RS table */
2784                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2785                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2786                 ridx = in->in_ridx[i];
2787
2788                 /* This is the index into the programmed table */
2789                 tx->initial_rate_index = i;
2790                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2791                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2792                     "%s: start with i=%d, txrate %d\n",
2793                     __func__, i, iwm_rates[ridx].rate);
2794                 /* XXX no rate_n_flags? */
2795                 return &iwm_rates[ridx];
2796         }
2797
2798         /*
2799          * For non-data, use the lowest supported rate for the given
2800          * operational mode.
2801          *
2802          * Note: there may not be any rate control information available.
2803          * This driver currently assumes if we're transmitting data
2804          * frames, use the rate control table.  Grr.
2805          *
2806          * XXX TODO: use the configured rate for the traffic type!
2807          */
2808         if (ic->ic_curmode == IEEE80211_MODE_11A) {
2809                 /*
2810                  * XXX this assumes the mode is either 11a or not 11a;
2811                  * definitely won't work for 11n.
2812                  */
2813                 ridx = IWM_RIDX_OFDM;
2814         } else {
2815                 ridx = IWM_RIDX_CCK;
2816         }
2817
2818         rinfo = &iwm_rates[ridx];
2819
2820         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2821             __func__, ridx,
2822             rinfo->rate,
2823             !! (IWM_RIDX_IS_CCK(ridx))
2824             );
2825
2826         /* XXX TODO: hard-coded TX antenna? */
2827         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2828         if (IWM_RIDX_IS_CCK(ridx))
2829                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2830         /* XXX hard-coded tx rate */
2831         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2832
2833         return rinfo;
2834 }
2835
2836 #define TB0_SIZE 16
2837 static int
2838 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2839 {
2840         struct ieee80211com *ic = sc->sc_ic;
2841         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2842         struct iwm_node *in = (struct iwm_node *)ni;
2843         struct iwm_tx_ring *ring;
2844         struct iwm_tx_data *data;
2845         struct iwm_tfd *desc;
2846         struct iwm_device_cmd *cmd;
2847         struct iwm_tx_cmd *tx;
2848         struct ieee80211_frame *wh;
2849         struct ieee80211_key *k = NULL;
2850         struct mbuf *m1;
2851         const struct iwm_rate *rinfo;
2852         uint32_t flags;
2853         u_int hdrlen;
2854         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2855         int nsegs;
2856         uint8_t tid, type;
2857         int i, totlen, error, pad;
2858
2859         wh = mtod(m, struct ieee80211_frame *);
2860         hdrlen = ieee80211_anyhdrsize(wh);
2861         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2862         tid = 0;
2863         ring = &sc->txq[ac];
2864         desc = &ring->desc[ring->cur];
2865         memset(desc, 0, sizeof(*desc));
2866         data = &ring->data[ring->cur];
2867
2868         /* Fill out iwm_tx_cmd to send to the firmware */
2869         cmd = &ring->cmd[ring->cur];
2870         cmd->hdr.code = IWM_TX_CMD;
2871         cmd->hdr.flags = 0;
2872         cmd->hdr.qid = ring->qid;
2873         cmd->hdr.idx = ring->cur;
2874
2875         tx = (void *)cmd->data;
2876         memset(tx, 0, sizeof(*tx));
2877
2878         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2879
2880         /* Encrypt the frame if need be. */
2881         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2882                 /* Retrieve key for TX && do software encryption. */
2883                 k = ieee80211_crypto_encap(ni, m);
2884                 if (k == NULL) {
2885                         m_freem(m);
2886                         return (ENOBUFS);
2887                 }
2888                 /* 802.11 header may have moved. */
2889                 wh = mtod(m, struct ieee80211_frame *);
2890         }
2891
2892         if (ieee80211_radiotap_active_vap(vap)) {
2893                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2894
2895                 tap->wt_flags = 0;
2896                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2897                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2898                 tap->wt_rate = rinfo->rate;
2899                 tap->wt_hwqueue = ac;
2900                 if (k != NULL)
2901                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2902                 ieee80211_radiotap_tx(vap, m);
2903         }
2904
2905
2906         totlen = m->m_pkthdr.len;
2907
2908         flags = 0;
2909         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2910                 flags |= IWM_TX_CMD_FLG_ACK;
2911         }
2912
2913         if (type != IEEE80211_FC0_TYPE_DATA
2914             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2915             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2916                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2917         }
2918
2919         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2920             type != IEEE80211_FC0_TYPE_DATA)
2921                 tx->sta_id = sc->sc_aux_sta.sta_id;
2922         else
2923                 tx->sta_id = IWM_STATION_ID;
2924
2925         if (type == IEEE80211_FC0_TYPE_MGT) {
2926                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2927
2928                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2929                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2930                         tx->pm_frame_timeout = htole16(3);
2931                 else
2932                         tx->pm_frame_timeout = htole16(2);
2933         } else {
2934                 tx->pm_frame_timeout = htole16(0);
2935         }
2936
2937         if (hdrlen & 3) {
2938                 /* First segment length must be a multiple of 4. */
2939                 flags |= IWM_TX_CMD_FLG_MH_PAD;
2940                 pad = 4 - (hdrlen & 3);
2941         } else
2942                 pad = 0;
2943
2944         tx->driver_txop = 0;
2945         tx->next_frame_len = 0;
2946
2947         tx->len = htole16(totlen);
2948         tx->tid_tspec = tid;
2949         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2950
2951         /* Set physical address of "scratch area". */
2952         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2953         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2954
2955         /* Copy 802.11 header in TX command. */
2956         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2957
2958         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2959
2960         tx->sec_ctl = 0;
2961         tx->tx_flags |= htole32(flags);
2962
2963         /* Trim 802.11 header. */
2964         m_adj(m, hdrlen);
2965 #if defined(__DragonFly__)
2966         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2967                                             segs, IWM_MAX_SCATTER - 1,
2968                                             &nsegs, BUS_DMA_NOWAIT);
2969 #else
2970         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2971             segs, &nsegs, BUS_DMA_NOWAIT);
2972 #endif
2973         if (error != 0) {
2974                 if (error != EFBIG) {
2975                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2976                             error);
2977                         m_freem(m);
2978                         return error;
2979                 }
2980                 /* Too many DMA segments, linearize mbuf. */
2981                 MGETHDR(m1, M_NOWAIT, MT_DATA);
2982                 if (m1 == NULL) {
2983                         m_freem(m);
2984                         return ENOBUFS;
2985                 }
2986                 if (m->m_pkthdr.len > MHLEN) {
2987                         MCLGET(m1, M_NOWAIT);
2988                         if (!(m1->m_flags & M_EXT)) {
2989                                 m_freem(m);
2990                                 m_freem(m1);
2991                                 return ENOBUFS;
2992                         }
2993                 }
2994                 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
2995                 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
2996                 m_freem(m);
2997                 m = m1;
2998 #if defined(__DragonFly__)
2999                 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
3000                                                     segs, IWM_MAX_SCATTER - 1,
3001                                                     &nsegs, BUS_DMA_NOWAIT);
3002 #else
3003                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3004                     segs, &nsegs, BUS_DMA_NOWAIT);
3005 #endif
3006                 if (error != 0) {
3007                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3008                             error);
3009                         m_freem(m);
3010                         return error;
3011                 }
3012         }
3013         data->m = m;
3014         data->in = in;
3015         data->done = 0;
3016
3017         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3018             "sending txd %p, in %p\n", data, data->in);
3019         KASSERT(data->in != NULL, ("node is NULL"));
3020
3021         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3022             "sending data: qid=%d idx=%d len=%d nsegs=%d\n",
3023             ring->qid, ring->cur, totlen, nsegs);
3024
3025         /* Fill TX descriptor. */
3026         desc->num_tbs = 2 + nsegs;
3027
3028         desc->tbs[0].lo = htole32(data->cmd_paddr);
3029         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3030             (TB0_SIZE << 4);
3031         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3032         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3033             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3034               + hdrlen + pad - TB0_SIZE) << 4);
3035
3036         /* Other DMA segments are for data payload. */
3037         for (i = 0; i < nsegs; i++) {
3038                 seg = &segs[i];
3039                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3040                 desc->tbs[i+2].hi_n_len = \
3041                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3042                     | ((seg->ds_len) << 4);
3043         }
3044
3045         bus_dmamap_sync(ring->data_dmat, data->map,
3046             BUS_DMASYNC_PREWRITE);
3047         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3048             BUS_DMASYNC_PREWRITE);
3049         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3050             BUS_DMASYNC_PREWRITE);
3051
3052 #if 0
3053         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3054 #endif
3055
3056         /* Kick TX ring. */
3057         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3058         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3059
3060         /* Mark TX ring as full if we reach a certain threshold. */
3061         if (++ring->queued > IWM_TX_RING_HIMARK) {
3062                 sc->qfullmsk |= 1 << ring->qid;
3063         }
3064
3065         return 0;
3066 }
3067
3068 static int
3069 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3070     const struct ieee80211_bpf_params *params)
3071 {
3072         struct ieee80211com *ic = ni->ni_ic;
3073         struct ifnet *ifp = ic->ic_ifp;
3074         struct iwm_softc *sc = ifp->if_softc;
3075         int error = 0;
3076
3077         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3078             "->%s begin\n", __func__);
3079
3080 #if defined(__DragonFly__)
3081         if ((ifp->if_flags & IFF_RUNNING) == 0) {
3082 #else
3083         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3084 #endif
3085                 ieee80211_free_node(ni);
3086                 m_freem(m);
3087                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3088                     "<-%s not RUNNING\n", __func__);
3089                 return (ENETDOWN);
3090         }
3091
3092         IWM_LOCK(sc);
3093         /* XXX fix this */
3094         if (params == NULL) {
3095                 error = iwm_tx(sc, m, ni, 0);
3096         } else {
3097                 error = iwm_tx(sc, m, ni, 0);
3098         }
3099         if (error != 0) {
3100                 /* NB: m is reclaimed on tx failure */
3101                 ieee80211_free_node(ni);
3102 #if defined(__DragonFly__)
3103                 ++ifp->if_oerrors;
3104 #else
3105                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3106 #endif
3107         }
3108         sc->sc_tx_timer = 5;
3109         IWM_UNLOCK(sc);
3110
3111         return (error);
3112 }
3113
3114 /*
3115  * mvm/tx.c
3116  */
3117
3118 #if 0
3119 /*
3120  * Note that there are transports that buffer frames before they reach
3121  * the firmware. This means that after flush_tx_path is called, the
3122  * queue might not be empty. The race-free way to handle this is to:
3123  * 1) set the station as draining
3124  * 2) flush the Tx path
3125  * 3) wait for the transport queues to be empty
3126  */
3127 int
3128 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3129 {
3130         struct iwm_tx_path_flush_cmd flush_cmd = {
3131                 .queues_ctl = htole32(tfd_msk),
3132                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3133         };
3134         int ret;
3135
3136         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3137             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3138             sizeof(flush_cmd), &flush_cmd);
3139         if (ret)
3140                 device_printf(sc->sc_dev,
3141                     "Flushing tx queue failed: %d\n", ret);
3142         return ret;
3143 }
3144 #endif
3145
3146 /*
3147  * BEGIN mvm/sta.c
3148  */
3149
3150 static void
3151 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3152         struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3153 {
3154         memset(cmd_v5, 0, sizeof(*cmd_v5));
3155
3156         cmd_v5->add_modify = cmd_v6->add_modify;
3157         cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3158         cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3159         IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3160         cmd_v5->sta_id = cmd_v6->sta_id;
3161         cmd_v5->modify_mask = cmd_v6->modify_mask;
3162         cmd_v5->station_flags = cmd_v6->station_flags;
3163         cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3164         cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3165         cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3166         cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3167         cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3168         cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3169         cmd_v5->assoc_id = cmd_v6->assoc_id;
3170         cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3171         cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3172 }
3173
3174 static int
3175 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3176         struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3177 {
3178         struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3179
3180         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3181                 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3182                     sizeof(*cmd), cmd, status);
3183         }
3184
3185         iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3186
3187         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3188             &cmd_v5, status);
3189 }
3190
3191 /* send station add/update command to firmware */
3192 static int
3193 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3194 {
3195         struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3196         int ret;
3197         uint32_t status;
3198
3199         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3200
3201         add_sta_cmd.sta_id = IWM_STATION_ID;
3202         add_sta_cmd.mac_id_n_color
3203             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3204                 IWM_DEFAULT_COLOR));
3205         if (!update) {
3206                 add_sta_cmd.tfd_queue_msk = htole32(0xf);
3207                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3208         }
3209         add_sta_cmd.add_modify = update ? 1 : 0;
3210         add_sta_cmd.station_flags_msk
3211             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3212
3213         status = IWM_ADD_STA_SUCCESS;
3214         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3215         if (ret)
3216                 return ret;
3217
3218         switch (status) {
3219         case IWM_ADD_STA_SUCCESS:
3220                 break;
3221         default:
3222                 ret = EIO;
3223                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3224                 break;
3225         }
3226
3227         return ret;
3228 }
3229
3230 static int
3231 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3232 {
3233         int ret;
3234
3235         ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3236         if (ret)
3237                 return ret;
3238
3239         return 0;
3240 }
3241
3242 static int
3243 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3244 {
3245         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3246 }
3247
3248 static int
3249 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3250         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3251 {
3252         struct iwm_mvm_add_sta_cmd_v6 cmd;
3253         int ret;
3254         uint32_t status;
3255
3256         memset(&cmd, 0, sizeof(cmd));
3257         cmd.sta_id = sta->sta_id;
3258         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3259
3260         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3261
3262         if (addr)
3263                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3264
3265         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3266         if (ret)
3267                 return ret;
3268
3269         switch (status) {
3270         case IWM_ADD_STA_SUCCESS:
3271                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3272                     "%s: Internal station added.\n", __func__);
3273                 return 0;
3274         default:
3275                 device_printf(sc->sc_dev,
3276                     "%s: Add internal station failed, status=0x%x\n",
3277                     __func__, status);
3278                 ret = EIO;
3279                 break;
3280         }
3281         return ret;
3282 }
3283
3284 static int
3285 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3286 {
3287         int ret;
3288
3289         sc->sc_aux_sta.sta_id = 3;
3290         sc->sc_aux_sta.tfd_queue_msk = 0;
3291
3292         ret = iwm_mvm_add_int_sta_common(sc,
3293             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3294
3295         if (ret)
3296                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3297         return ret;
3298 }
3299
3300 /*
3301  * END mvm/sta.c
3302  */
3303
3304 /*
3305  * BEGIN mvm/quota.c
3306  */
3307
3308 static int
3309 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3310 {
3311         struct iwm_time_quota_cmd cmd;
3312         int i, idx, ret, num_active_macs, quota, quota_rem;
3313         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3314         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3315         uint16_t id;
3316
3317         memset(&cmd, 0, sizeof(cmd));
3318
3319         /* currently, PHY ID == binding ID */
3320         if (in) {
3321                 id = in->in_phyctxt->id;
3322                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3323                 colors[id] = in->in_phyctxt->color;
3324
3325                 if (1)
3326                         n_ifs[id] = 1;
3327         }
3328
3329         /*
3330          * The FW's scheduling session consists of
3331          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3332          * equally between all the bindings that require quota
3333          */
3334         num_active_macs = 0;
3335         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3336                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3337                 num_active_macs += n_ifs[i];
3338         }
3339
3340         quota = 0;
3341         quota_rem = 0;
3342         if (num_active_macs) {
3343                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3344                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3345         }
3346
3347         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3348                 if (colors[i] < 0)
3349                         continue;
3350
3351                 cmd.quotas[idx].id_and_color =
3352                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3353
3354                 if (n_ifs[i] <= 0) {
3355                         cmd.quotas[idx].quota = htole32(0);
3356                         cmd.quotas[idx].max_duration = htole32(0);
3357                 } else {
3358                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3359                         cmd.quotas[idx].max_duration = htole32(0);
3360                 }
3361                 idx++;
3362         }
3363
3364         /* Give the remainder of the session to the first binding */
3365         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3366
3367         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3368             sizeof(cmd), &cmd);
3369         if (ret)
3370                 device_printf(sc->sc_dev,
3371                     "%s: Failed to send quota: %d\n", __func__, ret);
3372         return ret;
3373 }
3374
3375 /*
3376  * END mvm/quota.c
3377  */
3378
3379 /*
3380  * ieee80211 routines
3381  */
3382
3383 /*
3384  * Change to AUTH state in 80211 state machine.  Roughly matches what
3385  * Linux does in bss_info_changed().
3386  */
3387 static int
3388 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3389 {
3390         struct ieee80211_node *ni;
3391         struct iwm_node *in;
3392         struct iwm_vap *iv = IWM_VAP(vap);
3393         uint32_t duration;
3394         uint32_t min_duration;
3395         int error;
3396
3397         /*
3398          * XXX i have a feeling that the vap node is being
3399          * freed from underneath us. Grr.
3400          */
3401         ni = ieee80211_ref_node(vap->iv_bss);
3402         in = (struct iwm_node *) ni;
3403         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3404             "%s: called; vap=%p, bss ni=%p\n",
3405             __func__,
3406             vap,
3407             ni);
3408
3409         in->in_assoc = 0;
3410
3411         error = iwm_allow_mcast(vap, sc);
3412         if (error) {
3413                 device_printf(sc->sc_dev,
3414                     "%s: failed to set multicast\n", __func__);
3415                 goto out;
3416         }
3417
3418         /*
3419          * This is where it deviates from what Linux does.
3420          *
3421          * Linux iwlwifi doesn't reset the nic each time, nor does it
3422          * call ctxt_add() here.  Instead, it adds it during vap creation,
3423          * and always does does a mac_ctx_changed().
3424          *
3425          * The openbsd port doesn't attempt to do that - it reset things
3426          * at odd states and does the add here.
3427          *
3428          * So, until the state handling is fixed (ie, we never reset
3429          * the NIC except for a firmware failure, which should drag
3430          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3431          * contexts that are required), let's do a dirty hack here.
3432          */
3433         if (iv->is_uploaded) {
3434                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3435                         device_printf(sc->sc_dev,
3436                             "%s: failed to add MAC\n", __func__);
3437                         goto out;
3438                 }
3439         } else {
3440                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3441                         device_printf(sc->sc_dev,
3442                             "%s: failed to add MAC\n", __func__);
3443                         goto out;
3444                 }
3445         }
3446
3447         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3448             in->in_ni.ni_chan, 1, 1)) != 0) {
3449                 device_printf(sc->sc_dev,
3450                     "%s: failed add phy ctxt\n", __func__);
3451                 goto out;
3452         }
3453         in->in_phyctxt = &sc->sc_phyctxt[0];
3454
3455         if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3456                 device_printf(sc->sc_dev,
3457                     "%s: binding cmd\n", __func__);
3458                 goto out;
3459         }
3460
3461         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3462                 device_printf(sc->sc_dev,
3463                     "%s: failed to add MAC\n", __func__);
3464                 goto out;
3465         }
3466
3467         /* a bit superfluous? */
3468         while (sc->sc_auth_prot) {
3469 #if defined(__DragonFly__)
3470                 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmauth", 0);
3471 #else
3472                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmauth", 0);
3473 #endif
3474         }
3475         sc->sc_auth_prot = 1;
3476
3477         duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
3478             200 + in->in_ni.ni_intval);
3479         min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
3480             100 + in->in_ni.ni_intval);
3481         iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
3482
3483         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3484             "%s: waiting for auth_prot\n", __func__);
3485         while (sc->sc_auth_prot != 2) {
3486                 /*
3487                  * well, meh, but if the kernel is sleeping for half a
3488                  * second, we have bigger problems
3489                  */
3490                 if (sc->sc_auth_prot == 0) {
3491                         device_printf(sc->sc_dev,
3492                             "%s: missed auth window!\n", __func__);
3493                         error = ETIMEDOUT;
3494                         goto out;
3495                 } else if (sc->sc_auth_prot == -1) {
3496                         device_printf(sc->sc_dev,
3497                             "%s: no time event, denied!\n", __func__);
3498                         sc->sc_auth_prot = 0;
3499                         error = EAUTH;
3500                         goto out;
3501                 }
3502 #if defined(__DragonFly__)
3503                 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmau2", 0);
3504 #else
3505                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmau2", 0);
3506 #endif
3507         }
3508         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "<-%s\n", __func__);
3509         error = 0;
3510 out:
3511         ieee80211_free_node(ni);
3512         return (error);
3513 }
3514
3515 static int
3516 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3517 {
3518         struct iwm_node *in = (struct iwm_node *)vap->iv_bss;
3519         int error;
3520
3521         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3522                 device_printf(sc->sc_dev,
3523                     "%s: failed to update STA\n", __func__);
3524                 return error;
3525         }
3526
3527         in->in_assoc = 1;
3528         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3529                 device_printf(sc->sc_dev,
3530                     "%s: failed to update MAC\n", __func__);
3531                 return error;
3532         }
3533
3534         return 0;
3535 }
3536
3537 static int
3538 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3539 {
3540         /*
3541          * Ok, so *technically* the proper set of calls for going
3542          * from RUN back to SCAN is:
3543          *
3544          * iwm_mvm_power_mac_disable(sc, in);
3545          * iwm_mvm_mac_ctxt_changed(sc, in);
3546          * iwm_mvm_rm_sta(sc, in);
3547          * iwm_mvm_update_quotas(sc, NULL);
3548          * iwm_mvm_mac_ctxt_changed(sc, in);
3549          * iwm_mvm_binding_remove_vif(sc, in);
3550          * iwm_mvm_mac_ctxt_remove(sc, in);
3551          *
3552          * However, that freezes the device not matter which permutations
3553          * and modifications are attempted.  Obviously, this driver is missing
3554          * something since it works in the Linux driver, but figuring out what
3555          * is missing is a little more complicated.  Now, since we're going
3556          * back to nothing anyway, we'll just do a complete device reset.
3557          * Up your's, device!
3558          */
3559         //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3560         iwm_stop_device(sc);
3561         iwm_init_hw(sc);
3562         if (in)
3563                 in->in_assoc = 0;
3564         return 0;
3565
3566 #if 0
3567         int error;
3568
3569         iwm_mvm_power_mac_disable(sc, in);
3570
3571         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3572                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3573                 return error;
3574         }
3575
3576         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3577                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3578                 return error;
3579         }
3580         error = iwm_mvm_rm_sta(sc, in);
3581         in->in_assoc = 0;
3582         iwm_mvm_update_quotas(sc, NULL);
3583         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3584                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3585                 return error;
3586         }
3587         iwm_mvm_binding_remove_vif(sc, in);
3588
3589         iwm_mvm_mac_ctxt_remove(sc, in);
3590
3591         return error;
3592 #endif
3593 }
3594
3595 static struct ieee80211_node *
3596 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3597 {
3598         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3599             M_INTWAIT | M_ZERO);
3600 }
3601
3602 static void
3603 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3604 {
3605         struct ieee80211_node *ni = &in->in_ni;
3606         struct iwm_lq_cmd *lq = &in->in_lq;
3607         int nrates = ni->ni_rates.rs_nrates;
3608         int i, ridx, tab = 0;
3609         int txant = 0;
3610
3611         if (nrates > nitems(lq->rs_table)) {
3612                 device_printf(sc->sc_dev,
3613                     "%s: node supports %d rates, driver handles "
3614                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3615                 return;
3616         }
3617
3618         /*
3619          * XXX .. and most of iwm_node is not initialised explicitly;
3620          * it's all just 0x0 passed to the firmware.
3621          */
3622
3623         /* first figure out which rates we should support */
3624         /* XXX TODO: this isn't 11n aware /at all/ */
3625         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3626         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3627             "%s: nrates=%d\n", __func__, nrates);
3628         for (i = 0; i < nrates; i++) {
3629                 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
3630
3631                 /* Map 802.11 rate to HW rate index. */
3632                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3633                         if (iwm_rates[ridx].rate == rate)
3634                                 break;
3635                 if (ridx > IWM_RIDX_MAX) {
3636                         device_printf(sc->sc_dev,
3637                             "%s: WARNING: device rate for %d not found!\n",
3638                             __func__, rate);
3639                 } else {
3640                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3641                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
3642                             __func__,
3643                             i,
3644                             rate,
3645                             ridx);
3646                         in->in_ridx[i] = ridx;
3647                 }
3648         }
3649
3650         /* then construct a lq_cmd based on those */
3651         memset(lq, 0, sizeof(*lq));
3652         lq->sta_id = IWM_STATION_ID;
3653
3654         /*
3655          * are these used? (we don't do SISO or MIMO)
3656          * need to set them to non-zero, though, or we get an error.
3657          */
3658         lq->single_stream_ant_msk = 1;
3659         lq->dual_stream_ant_msk = 1;
3660
3661         /*
3662          * Build the actual rate selection table.
3663          * The lowest bits are the rates.  Additionally,
3664          * CCK needs bit 9 to be set.  The rest of the bits
3665          * we add to the table select the tx antenna
3666          * Note that we add the rates in the highest rate first
3667          * (opposite of ni_rates).
3668          */
3669         /*
3670          * XXX TODO: this should be looping over the min of nrates
3671          * and LQ_MAX_RETRY_NUM.  Sigh.
3672          */
3673         for (i = 0; i < nrates; i++) {
3674                 int nextant;
3675
3676                 if (txant == 0)
3677                         txant = IWM_FW_VALID_TX_ANT(sc);
3678                 nextant = 1<<(ffs(txant)-1);
3679                 txant &= ~nextant;
3680
3681                 /*
3682                  * Map the rate id into a rate index into
3683                  * our hardware table containing the
3684                  * configuration to use for this rate.
3685                  */
3686                 ridx = in->in_ridx[(nrates-1)-i];
3687                 tab = iwm_rates[ridx].plcp;
3688                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3689                 if (IWM_RIDX_IS_CCK(ridx))
3690                         tab |= IWM_RATE_MCS_CCK_MSK;
3691                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3692                     "station rate i=%d, rate=%d, hw=%x\n",
3693                     i, iwm_rates[ridx].rate, tab);
3694                 lq->rs_table[i] = htole32(tab);
3695         }
3696         /* then fill the rest with the lowest possible rate */
3697         for (i = nrates; i < nitems(lq->rs_table); i++) {
3698                 KASSERT(tab != 0, ("invalid tab"));
3699                 lq->rs_table[i] = htole32(tab);
3700         }
3701 }
3702
3703 static int
3704 iwm_media_change(struct ifnet *ifp)
3705 {
3706         struct iwm_softc *sc = ifp->if_softc;
3707         int error;
3708
3709         error = ieee80211_media_change(ifp);
3710         if (error != ENETRESET)
3711                 return error;
3712
3713 #if defined(__DragonFly__)
3714         if ((ifp->if_flags & IFF_UP) &&
3715             (ifp->if_flags & IFF_RUNNING)) {
3716 #else
3717         if ((ifp->if_flags & IFF_UP) &&
3718             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3719 #endif
3720                 iwm_stop(ifp, 0);
3721                 iwm_init(sc);
3722         }
3723         return error;
3724 }
3725
3726
3727 static int
3728 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3729 {
3730         struct iwm_vap *ivp = IWM_VAP(vap);
3731         struct ieee80211com *ic = vap->iv_ic;
3732         struct iwm_softc *sc = ic->ic_ifp->if_softc;
3733         struct iwm_node *in;
3734         int error;
3735
3736         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3737             "switching state %s -> %s\n",
3738             ieee80211_state_name[vap->iv_state],
3739             ieee80211_state_name[nstate]);
3740         IEEE80211_UNLOCK(ic);
3741         IWM_LOCK(sc);
3742         /* disable beacon filtering if we're hopping out of RUN */
3743         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3744                 iwm_mvm_disable_beacon_filter(sc);
3745
3746                 if (((in = (void *)vap->iv_bss) != NULL))
3747                         in->in_assoc = 0;
3748
3749                 iwm_release(sc, NULL);
3750
3751                 /*
3752                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
3753                  * above then the card will be completely reinitialized,
3754                  * so the driver must do everything necessary to bring the card
3755                  * from INIT to SCAN.
3756                  *
3757                  * Additionally, upon receiving deauth frame from AP,
3758                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3759                  * state. This will also fail with this driver, so bring the FSM
3760                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3761                  *
3762                  * XXX TODO: fix this for FreeBSD!
3763                  */
3764                 if (nstate == IEEE80211_S_SCAN ||
3765                     nstate == IEEE80211_S_AUTH ||
3766                     nstate == IEEE80211_S_ASSOC) {
3767                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3768                             "Force transition to INIT; MGT=%d\n", arg);
3769                         IWM_UNLOCK(sc);
3770                         IEEE80211_LOCK(ic);
3771                         vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3772                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3773                             "Going INIT->SCAN\n");
3774                         nstate = IEEE80211_S_SCAN;
3775                         IEEE80211_UNLOCK(ic);
3776                         IWM_LOCK(sc);
3777                 }
3778         }
3779
3780         switch (nstate) {
3781         case IEEE80211_S_INIT:
3782                 sc->sc_scanband = 0;
3783                 break;
3784
3785         case IEEE80211_S_AUTH:
3786                 if ((error = iwm_auth(vap, sc)) != 0) {
3787                         device_printf(sc->sc_dev,
3788                             "%s: could not move to auth state: %d\n",
3789                             __func__, error);
3790                         break;
3791                 }
3792                 break;
3793
3794         case IEEE80211_S_ASSOC:
3795                 if ((error = iwm_assoc(vap, sc)) != 0) {
3796                         device_printf(sc->sc_dev,
3797                             "%s: failed to associate: %d\n", __func__,
3798                             error);
3799                         break;
3800                 }
3801                 break;
3802
3803         case IEEE80211_S_RUN:
3804         {
3805                 struct iwm_host_cmd cmd = {
3806                         .id = IWM_LQ_CMD,
3807                         .len = { sizeof(in->in_lq), },
3808                         .flags = IWM_CMD_SYNC,
3809                 };
3810
3811                 /* Update the association state, now we have it all */
3812                 /* (eg associd comes in at this point */
3813                 error = iwm_assoc(vap, sc);
3814                 if (error != 0) {
3815                         device_printf(sc->sc_dev,
3816                             "%s: failed to update association state: %d\n",
3817                             __func__,
3818                             error);
3819                         break;
3820                 }
3821
3822                 in = (struct iwm_node *)vap->iv_bss;
3823                 iwm_mvm_power_mac_update_mode(sc, in);
3824                 iwm_mvm_enable_beacon_filter(sc, in);
3825                 iwm_mvm_update_quotas(sc, in);
3826                 iwm_setrates(sc, in);
3827
3828                 cmd.data[0] = &in->in_lq;
3829                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3830                         device_printf(sc->sc_dev,
3831                             "%s: IWM_LQ_CMD failed\n", __func__);
3832                 }
3833
3834                 break;
3835         }
3836
3837         default:
3838                 break;
3839         }
3840         IWM_UNLOCK(sc);
3841         IEEE80211_LOCK(ic);
3842
3843         return (ivp->iv_newstate(vap, nstate, arg));
3844 }
3845
3846 void
3847 iwm_endscan_cb(void *arg, int pending)
3848 {
3849         struct iwm_softc *sc = arg;
3850         struct ieee80211com *ic = sc->sc_ic;
3851         int done;
3852         int error;
3853
3854         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3855             "%s: scan ended\n",
3856             __func__);
3857
3858         IWM_LOCK(sc);
3859         if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3860             sc->sc_nvm.sku_cap_band_52GHz_enable) {
3861                 done = 0;
3862                 if ((error = iwm_mvm_scan_request(sc,
3863                     IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3864                         device_printf(sc->sc_dev, "could not initiate scan\n");
3865                         done = 1;
3866                 }
3867         } else {
3868                 done = 1;
3869         }
3870
3871         if (done) {
3872                 IWM_UNLOCK(sc);
3873                 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3874                 IWM_LOCK(sc);
3875                 sc->sc_scanband = 0;
3876         }
3877         IWM_UNLOCK(sc);
3878 }
3879
3880 static int
3881 iwm_init_hw(struct iwm_softc *sc)
3882 {
3883         struct ieee80211com *ic = sc->sc_ic;
3884         int error, i, qid;
3885
3886         if ((error = iwm_start_hw(sc)) != 0) {
3887                 kprintf("iwm_start_hw: failed %d\n", error);
3888                 return error;
3889         }
3890
3891         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3892                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3893                 return error;
3894         }
3895
3896         /*
3897          * should stop and start HW since that INIT
3898          * image just loaded
3899          */
3900         iwm_stop_device(sc);
3901         if ((error = iwm_start_hw(sc)) != 0) {
3902                 device_printf(sc->sc_dev, "could not initialize hardware\n");
3903                 return error;
3904         }
3905
3906         /* omstart, this time with the regular firmware */
3907         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3908         if (error) {
3909                 device_printf(sc->sc_dev, "could not load firmware\n");
3910                 goto error;
3911         }
3912
3913         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3914                 device_printf(sc->sc_dev, "antenna config failed\n");
3915                 goto error;
3916         }
3917
3918         /* Send phy db control command and then phy db calibration*/
3919         if ((error = iwm_send_phy_db_data(sc)) != 0) {
3920                 device_printf(sc->sc_dev, "phy_db_data failed\n");
3921                 goto error;
3922         }
3923
3924         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3925                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3926                 goto error;
3927         }
3928
3929         /* Add auxiliary station for scanning */
3930         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3931                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
3932                 goto error;
3933         }
3934
3935         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3936                 /*
3937                  * The channel used here isn't relevant as it's
3938                  * going to be overwritten in the other flows.
3939                  * For now use the first channel we have.
3940                  */
3941                 if ((error = iwm_mvm_phy_ctxt_add(sc,
3942                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3943                         goto error;
3944         }
3945
3946         error = iwm_mvm_power_update_device(sc);
3947         if (error)
3948                 goto error;
3949
3950         /* Mark TX rings as active. */
3951         for (qid = 0; qid < 4; qid++) {
3952                 iwm_enable_txq(sc, qid, qid);
3953         }
3954
3955         return 0;
3956
3957  error:
3958         iwm_stop_device(sc);
3959         return error;
3960 }
3961
3962 /* Allow multicast from our BSSID. */
3963 static int
3964 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3965 {
3966         struct ieee80211_node *ni = vap->iv_bss;
3967         struct iwm_mcast_filter_cmd *cmd;
3968         size_t size;
3969         int error;
3970
3971         size = roundup(sizeof(*cmd), 4);
3972         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3973         if (cmd == NULL)
3974                 return ENOMEM;
3975         cmd->filter_own = 1;
3976         cmd->port_id = 0;
3977         cmd->count = 0;
3978         cmd->pass_all = 1;
3979         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3980
3981         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3982             IWM_CMD_SYNC, size, cmd);
3983         kfree(cmd, M_DEVBUF);
3984
3985         return (error);
3986 }
3987
3988 /*
3989  * ifnet interfaces
3990  */
3991
3992 static void
3993 iwm_init(void *arg)
3994 {
3995         struct iwm_softc *sc = arg;
3996
3997         IWM_LOCK(sc);
3998         iwm_init_locked(sc);
3999         IWM_UNLOCK(sc);
4000 }
4001
4002 static void
4003 iwm_init_locked(struct iwm_softc *sc)
4004 {
4005         struct ifnet *ifp = sc->sc_ifp;
4006         int error;
4007
4008         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4009                 return;
4010         }
4011         sc->sc_generation++;
4012         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4013
4014         if ((error = iwm_init_hw(sc)) != 0) {
4015                 kprintf("iwm_init_hw failed %d\n", error);
4016                 iwm_stop_locked(ifp);
4017                 return;
4018         }
4019
4020         /*
4021          * Ok, firmware loaded and we are jogging
4022          */
4023 #if defined(__DragonFly__)
4024         ifq_clr_oactive(&ifp->if_snd);
4025         ifp->if_flags |= IFF_RUNNING;
4026 #else
4027         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4028         ifp->if_drv_flags |= IFF_DRV_RUNNING;
4029 #endif
4030         sc->sc_flags |= IWM_FLAG_HW_INITED;
4031         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4032 }
4033
4034 /*
4035  * Dequeue packets from sendq and call send.
4036  * mostly from iwn
4037  */
4038 #if defined(__DragonFly__)
4039 static void
4040 iwm_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4041 #else
4042 static void
4043 iwm_start(struct ifnet *ifp)
4044 #endif
4045 {
4046         struct iwm_softc *sc = ifp->if_softc;
4047
4048         IWM_LOCK(sc);
4049         iwm_start_locked(ifp);
4050         IWM_UNLOCK(sc);
4051 }
4052
4053 static void
4054 iwm_start_locked(struct ifnet *ifp)
4055 {
4056         struct iwm_softc *sc = ifp->if_softc;
4057         struct ieee80211_node *ni;
4058         struct mbuf *m;
4059         int ac = 0;
4060
4061 #if defined(__DragonFly__)
4062         if ((ifp->if_flags & IFF_RUNNING) == 0)
4063                 ifq_purge(&ifp->if_snd);
4064         if (ifq_is_oactive(&ifp->if_snd) || (ifp->if_flags & IFF_RUNNING) == 0)
4065                 return;
4066 #else
4067         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
4068                 return;
4069 #endif
4070
4071         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4072         for (;;) {
4073                 /* why isn't this done per-queue? */
4074                 if (sc->qfullmsk != 0) {
4075 #if defined(__DragonFly__)
4076                         ifq_set_oactive(&ifp->if_snd);
4077 #else
4078                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4079 #endif
4080                         break;
4081                 }
4082                 m = ifq_dequeue(&ifp->if_snd);
4083                 if (!m)
4084                         break;
4085                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4086                 if (iwm_tx(sc, m, ni, ac) != 0) {
4087                         ieee80211_free_node(ni);
4088 #if defined(__DragonFly__)
4089                         ++ifp->if_oerrors;
4090 #else
4091                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4092 #endif
4093                         continue;
4094                 }
4095
4096                 if (ifp->if_flags & IFF_UP) {
4097                         sc->sc_tx_timer = 15;
4098                 }
4099         }
4100         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4101 }
4102
4103 static void
4104 iwm_stop(struct ifnet *ifp, int disable)
4105 {
4106         struct iwm_softc *sc = ifp->if_softc;
4107
4108         IWM_LOCK(sc);
4109         iwm_stop_locked(ifp);
4110         IWM_UNLOCK(sc);
4111 }
4112
4113 static void
4114 iwm_stop_locked(struct ifnet *ifp)
4115 {
4116         struct iwm_softc *sc = ifp->if_softc;
4117
4118         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4119         sc->sc_flags |= IWM_FLAG_STOPPED;
4120         sc->sc_generation++;
4121         sc->sc_scanband = 0;
4122         sc->sc_auth_prot = 0;
4123 #if defined(__DragonFly__)
4124         ifq_clr_oactive(&ifp->if_snd);
4125         ifp->if_flags &= ~IFF_RUNNING;
4126 #else
4127         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4128 #endif
4129         sc->sc_tx_timer = 0;
4130         iwm_stop_device(sc);
4131 }
4132
4133 static void
4134 iwm_watchdog(void *arg)
4135 {
4136         struct iwm_softc *sc = arg;
4137         struct ifnet *ifp = sc->sc_ifp;
4138
4139 #if defined(__DragonFly__)
4140 #else
4141         KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4142 #endif
4143         if (sc->sc_tx_timer > 0) {
4144                 if (--sc->sc_tx_timer == 0) {
4145                         device_printf(sc->sc_dev, "device timeout\n");
4146 #ifdef IWM_DEBUG
4147                         iwm_nic_error(sc);
4148 #endif
4149                         ifp->if_flags &= ~IFF_UP;
4150                         iwm_stop_locked(ifp);
4151 #if defined(__DragonFly__)
4152                         ++ifp->if_oerrors;
4153 #else
4154                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4155 #endif
4156                         return;
4157                 }
4158         }
4159         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4160 }
4161
4162 #if defined(__DragonFly__)
4163 static int
4164 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data, struct ucred *cred)
4165 #else
4166 static int
4167 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
4168 #endif
4169 {
4170         struct iwm_softc *sc = ifp->if_softc;
4171         struct ieee80211com *ic = sc->sc_ic;
4172         struct ifreq *ifr = (struct ifreq *) data;
4173         int error = 0, startall = 0;
4174
4175         switch (cmd) {
4176         case SIOCGIFADDR:
4177                 error = ether_ioctl(ifp, cmd, data);
4178                 break;
4179         case SIOCGIFMEDIA:
4180                 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4181                 break;
4182         case SIOCSIFFLAGS:
4183                 IWM_LOCK(sc);
4184 #if defined(__DragonFly__)
4185                 if (ifp->if_flags & IFF_UP) {
4186                         if (!(ifp->if_flags & IFF_RUNNING)) {
4187                                 iwm_init_locked(sc);
4188                                 startall = 1;
4189                         }
4190                 } else {
4191                         if (ifp->if_flags & IFF_RUNNING)
4192                                 iwm_stop_locked(ifp);
4193                 }
4194 #else
4195                 if (ifp->if_flags & IFF_UP) {
4196                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4197                                 iwm_init_locked(sc);
4198                                 startall = 1;
4199                         }
4200                 } else {
4201                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4202                                 iwm_stop_locked(ifp);
4203                 }
4204 #endif
4205                 IWM_UNLOCK(sc);
4206                 if (startall)
4207                         ieee80211_start_all(ic);
4208
4209                 break;
4210         default:
4211                 error = EINVAL;
4212                 break;
4213         }
4214
4215         return error;
4216 }
4217
4218 /*
4219  * The interrupt side of things
4220  */
4221
4222 /*
4223  * error dumping routines are from iwlwifi/mvm/utils.c
4224  */
4225
4226 /*
4227  * Note: This structure is read from the device with IO accesses,
4228  * and the reading already does the endian conversion. As it is
4229  * read with uint32_t-sized accesses, any members with a different size
4230  * need to be ordered correctly though!
4231  */
4232 struct iwm_error_event_table {
4233         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4234         uint32_t error_id;              /* type of error */
4235         uint32_t pc;                    /* program counter */
4236         uint32_t blink1;                /* branch link */
4237         uint32_t blink2;                /* branch link */
4238         uint32_t ilink1;                /* interrupt link */
4239         uint32_t ilink2;                /* interrupt link */
4240         uint32_t data1;         /* error-specific data */
4241         uint32_t data2;         /* error-specific data */
4242         uint32_t data3;         /* error-specific data */
4243         uint32_t bcon_time;             /* beacon timer */
4244         uint32_t tsf_low;               /* network timestamp function timer */
4245         uint32_t tsf_hi;                /* network timestamp function timer */
4246         uint32_t gp1;           /* GP1 timer register */
4247         uint32_t gp2;           /* GP2 timer register */
4248         uint32_t gp3;           /* GP3 timer register */
4249         uint32_t ucode_ver;             /* uCode version */
4250         uint32_t hw_ver;                /* HW Silicon version */
4251         uint32_t brd_ver;               /* HW board version */
4252         uint32_t log_pc;                /* log program counter */
4253         uint32_t frame_ptr;             /* frame pointer */
4254         uint32_t stack_ptr;             /* stack pointer */
4255         uint32_t hcmd;          /* last host command header */
4256         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4257                                  * rxtx_flag */
4258         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4259                                  * host_flag */
4260         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4261                                  * enc_flag */
4262         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4263                                  * time_flag */
4264         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4265                                  * wico interrupt */
4266         uint32_t isr_pref;              /* isr status register LMPM_NIC_PREF_STAT */
4267         uint32_t wait_event;            /* wait event() caller address */
4268         uint32_t l2p_control;   /* L2pControlField */
4269         uint32_t l2p_duration;  /* L2pDurationField */
4270         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4271         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4272         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4273                                  * (LMPM_PMG_SEL) */
4274         uint32_t u_timestamp;   /* indicate when the date and time of the
4275                                  * compilation */
4276         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4277 } __packed;
4278
4279 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4280 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4281
4282 #ifdef IWM_DEBUG
4283 struct {
4284         const char *name;
4285         uint8_t num;
4286 } advanced_lookup[] = {
4287         { "NMI_INTERRUPT_WDG", 0x34 },
4288         { "SYSASSERT", 0x35 },
4289         { "UCODE_VERSION_MISMATCH", 0x37 },
4290         { "BAD_COMMAND", 0x38 },
4291         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4292         { "FATAL_ERROR", 0x3D },
4293         { "NMI_TRM_HW_ERR", 0x46 },
4294         { "NMI_INTERRUPT_TRM", 0x4C },
4295         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4296         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4297         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4298         { "NMI_INTERRUPT_HOST", 0x66 },
4299         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4300         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4301         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4302         { "ADVANCED_SYSASSERT", 0 },
4303 };
4304
4305 static const char *
4306 iwm_desc_lookup(uint32_t num)
4307 {
4308         int i;
4309
4310         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4311                 if (advanced_lookup[i].num == num)
4312                         return advanced_lookup[i].name;
4313
4314         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4315         return advanced_lookup[i].name;
4316 }
4317
4318 /*
4319  * Support for dumping the error log seemed like a good idea ...
4320  * but it's mostly hex junk and the only sensible thing is the
4321  * hw/ucode revision (which we know anyway).  Since it's here,
4322  * I'll just leave it in, just in case e.g. the Intel guys want to
4323  * help us decipher some "ADVANCED_SYSASSERT" later.
4324  */
4325 static void
4326 iwm_nic_error(struct iwm_softc *sc)
4327 {
4328         struct iwm_error_event_table table;
4329         uint32_t base;
4330
4331         device_printf(sc->sc_dev, "dumping device error log\n");
4332         base = sc->sc_uc.uc_error_event_table;
4333         if (base < 0x800000 || base >= 0x80C000) {
4334                 device_printf(sc->sc_dev,
4335                     "Not valid error log pointer 0x%08x\n", base);
4336                 return;
4337         }
4338
4339         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4340                 device_printf(sc->sc_dev, "reading errlog failed\n");
4341                 return;
4342         }
4343
4344         if (!table.valid) {
4345                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4346                 return;
4347         }
4348
4349         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4350                 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4351                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4352                     sc->sc_flags, table.valid);
4353         }
4354
4355         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4356                 iwm_desc_lookup(table.error_id));
4357         device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4358         device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4359         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4360         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4361         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4362         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4363         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4364         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4365         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4366         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4367         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4368         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4369         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4370         device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4371         device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4372         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4373         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4374         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4375         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4376         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4377         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4378         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4379         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4380         device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4381         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4382         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4383         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4384         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4385         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4386         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4387         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4388         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4389 }
4390 #endif
4391
4392 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4393 do {                                                                    \
4394         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4395         _var_ = (void *)((_pkt_)+1);                                    \
4396 } while (/*CONSTCOND*/0)
4397
4398 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4399 do {                                                                    \
4400         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4401         _ptr_ = (void *)((_pkt_)+1);                                    \
4402 } while (/*CONSTCOND*/0)
4403
4404 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4405
4406 /*
4407  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4408  * Basic structure from if_iwn
4409  */
4410 static void
4411 iwm_notif_intr(struct iwm_softc *sc)
4412 {
4413         uint16_t hw;
4414
4415         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4416             BUS_DMASYNC_POSTREAD);
4417
4418         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4419
4420         /*
4421          * Process responses
4422          */
4423         while (sc->rxq.cur != hw) {
4424                 struct iwm_rx_ring *ring = &sc->rxq;
4425                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4426                 struct iwm_rx_packet *pkt;
4427                 struct iwm_cmd_response *cresp;
4428                 int qid, idx;
4429
4430                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4431                     BUS_DMASYNC_POSTREAD);
4432                 pkt = mtod(data->m, struct iwm_rx_packet *);
4433
4434                 qid = pkt->hdr.qid & ~0x80;
4435                 idx = pkt->hdr.idx;
4436
4437                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4438                     "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4439                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4440                     pkt->hdr.code, sc->rxq.cur, hw);
4441
4442                 /*
4443                  * randomly get these from the firmware, no idea why.
4444                  * they at least seem harmless, so just ignore them for now
4445                  */
4446                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4447                     || pkt->len_n_flags == htole32(0x55550000))) {
4448                         ADVANCE_RXQ(sc);
4449                         continue;
4450                 }
4451
4452                 switch (pkt->hdr.code) {
4453                 case IWM_REPLY_RX_PHY_CMD:
4454                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4455                         break;
4456
4457                 case IWM_REPLY_RX_MPDU_CMD:
4458                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4459                         break;
4460
4461                 case IWM_TX_CMD:
4462                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
4463                         break;
4464
4465                 case IWM_MISSED_BEACONS_NOTIFICATION: {
4466                         struct iwm_missed_beacons_notif *resp;
4467                         int missed;
4468
4469                         /* XXX look at mac_id to determine interface ID */
4470                         struct ieee80211com *ic = sc->sc_ic;
4471                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4472
4473                         SYNC_RESP_STRUCT(resp, pkt);
4474                         missed = le32toh(resp->consec_missed_beacons);
4475
4476                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4477                             "%s: MISSED_BEACON: mac_id=%d, "
4478                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4479                             "num_rx=%d\n",
4480                             __func__,
4481                             le32toh(resp->mac_id),
4482                             le32toh(resp->consec_missed_beacons_since_last_rx),
4483                             le32toh(resp->consec_missed_beacons),
4484                             le32toh(resp->num_expected_beacons),
4485                             le32toh(resp->num_recvd_beacons));
4486
4487                         /* Be paranoid */
4488                         if (vap == NULL)
4489                                 break;
4490
4491                         /* XXX no net80211 locking? */
4492                         if (vap->iv_state == IEEE80211_S_RUN &&
4493                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4494                                 if (missed > vap->iv_bmissthreshold) {
4495                                         /* XXX bad locking; turn into task */
4496                                         IWM_UNLOCK(sc);
4497                                         ieee80211_beacon_miss(ic);
4498                                         IWM_LOCK(sc);
4499                                 }
4500                         }
4501
4502                         break; }
4503
4504                 case IWM_MVM_ALIVE: {
4505                         struct iwm_mvm_alive_resp *resp;
4506                         SYNC_RESP_STRUCT(resp, pkt);
4507
4508                         sc->sc_uc.uc_error_event_table
4509                             = le32toh(resp->error_event_table_ptr);
4510                         sc->sc_uc.uc_log_event_table
4511                             = le32toh(resp->log_event_table_ptr);
4512                         sc->sched_base = le32toh(resp->scd_base_ptr);
4513                         sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4514
4515                         sc->sc_uc.uc_intr = 1;
4516                         wakeup(&sc->sc_uc);
4517                         break; }
4518
4519                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4520                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
4521                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
4522
4523                         iwm_phy_db_set_section(sc, phy_db_notif);
4524
4525                         break; }
4526
4527                 case IWM_STATISTICS_NOTIFICATION: {
4528                         struct iwm_notif_statistics *stats;
4529                         SYNC_RESP_STRUCT(stats, pkt);
4530                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4531                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
4532                         break; }
4533
4534                 case IWM_NVM_ACCESS_CMD:
4535                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4536                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4537                                     BUS_DMASYNC_POSTREAD);
4538                                 memcpy(sc->sc_cmd_resp,
4539                                     pkt, sizeof(sc->sc_cmd_resp));
4540                         }
4541                         break;
4542
4543                 case IWM_PHY_CONFIGURATION_CMD:
4544                 case IWM_TX_ANT_CONFIGURATION_CMD:
4545                 case IWM_ADD_STA:
4546                 case IWM_MAC_CONTEXT_CMD:
4547                 case IWM_REPLY_SF_CFG_CMD:
4548                 case IWM_POWER_TABLE_CMD:
4549                 case IWM_PHY_CONTEXT_CMD:
4550                 case IWM_BINDING_CONTEXT_CMD:
4551                 case IWM_TIME_EVENT_CMD:
4552                 case IWM_SCAN_REQUEST_CMD:
4553                 case IWM_REPLY_BEACON_FILTERING_CMD:
4554                 case IWM_MAC_PM_POWER_TABLE:
4555                 case IWM_TIME_QUOTA_CMD:
4556                 case IWM_REMOVE_STA:
4557                 case IWM_TXPATH_FLUSH:
4558                 case IWM_LQ_CMD:
4559                         SYNC_RESP_STRUCT(cresp, pkt);
4560                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4561                                 memcpy(sc->sc_cmd_resp,
4562                                     pkt, sizeof(*pkt)+sizeof(*cresp));
4563                         }
4564                         break;
4565
4566                 /* ignore */
4567                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4568                         break;
4569
4570                 case IWM_INIT_COMPLETE_NOTIF:
4571                         sc->sc_init_complete = 1;
4572                         wakeup(&sc->sc_init_complete);
4573                         break;
4574
4575                 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4576                         struct iwm_scan_complete_notif *notif;
4577                         SYNC_RESP_STRUCT(notif, pkt);
4578                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4579                         break; }
4580
4581                 case IWM_REPLY_ERROR: {
4582                         struct iwm_error_resp *resp;
4583                         SYNC_RESP_STRUCT(resp, pkt);
4584
4585                         device_printf(sc->sc_dev,
4586                             "firmware error 0x%x, cmd 0x%x\n",
4587                             le32toh(resp->error_type),
4588                             resp->cmd_id);
4589                         break; }
4590
4591                 case IWM_TIME_EVENT_NOTIFICATION: {
4592                         struct iwm_time_event_notif *notif;
4593                         SYNC_RESP_STRUCT(notif, pkt);
4594
4595                         if (notif->status) {
4596                                 if (le32toh(notif->action) &
4597                                     IWM_TE_V2_NOTIF_HOST_EVENT_START)
4598                                         sc->sc_auth_prot = 2;
4599                                 else
4600                                         sc->sc_auth_prot = 0;
4601                         } else {
4602                                 sc->sc_auth_prot = -1;
4603                         }
4604                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4605                             "%s: time event notification auth_prot=%d\n",
4606                                 __func__, sc->sc_auth_prot);
4607
4608                         wakeup(&sc->sc_auth_prot);
4609                         break; }
4610
4611                 case IWM_MCAST_FILTER_CMD:
4612                         break;
4613
4614                 default:
4615                         device_printf(sc->sc_dev,
4616                             "cmd %04x frame %d/%d %x UNHANDLED (this should "
4617                             "not happen)\n",
4618                             pkt->hdr.code, qid, idx,
4619                             pkt->len_n_flags);
4620                         panic("unhandled command");
4621                         break;
4622                 }
4623
4624                 /*
4625                  * Why test bit 0x80?  The Linux driver:
4626                  *
4627                  * There is one exception:  uCode sets bit 15 when it
4628                  * originates the response/notification, i.e. when the
4629                  * response/notification is not a direct response to a
4630                  * command sent by the driver.  For example, uCode issues
4631                  * IWM_REPLY_RX when it sends a received frame to the driver;
4632                  * it is not a direct response to any driver command.
4633                  *
4634                  * Ok, so since when is 7 == 15?  Well, the Linux driver
4635                  * uses a slightly different format for pkt->hdr, and "qid"
4636                  * is actually the upper byte of a two-byte field.
4637                  */
4638                 if (!(pkt->hdr.qid & (1 << 7))) {
4639                         iwm_cmd_done(sc, pkt);
4640                 }
4641
4642                 ADVANCE_RXQ(sc);
4643         }
4644
4645         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4646             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4647
4648         /*
4649          * Tell the firmware what we have processed.
4650          * Seems like the hardware gets upset unless we align
4651          * the write by 8??
4652          */
4653         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4654         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4655 }
4656
4657 static void
4658 iwm_intr(void *arg)
4659 {
4660         struct iwm_softc *sc = arg;
4661         struct ifnet *ifp = sc->sc_ifp;
4662         int handled = 0;
4663         int r1, r2, rv = 0;
4664         int isperiodic = 0;
4665
4666 #if defined(__DragonFly__)
4667         if (sc->sc_mem == NULL) {
4668                 kprintf("iwm_intr: detached\n");
4669                 return;
4670         }
4671 #endif
4672         IWM_LOCK(sc);
4673         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4674
4675         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4676                 uint32_t *ict = sc->ict_dma.vaddr;
4677                 int tmp;
4678
4679                 tmp = htole32(ict[sc->ict_cur]);
4680                 if (!tmp)
4681                         goto out_ena;
4682
4683                 /*
4684                  * ok, there was something.  keep plowing until we have all.
4685                  */
4686                 r1 = r2 = 0;
4687                 while (tmp) {
4688                         r1 |= tmp;
4689                         ict[sc->ict_cur] = 0;
4690                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4691                         tmp = htole32(ict[sc->ict_cur]);
4692                 }
4693
4694                 /* this is where the fun begins.  don't ask */
4695                 if (r1 == 0xffffffff)
4696                         r1 = 0;
4697
4698                 /* i am not expected to understand this */
4699                 if (r1 & 0xc0000)
4700                         r1 |= 0x8000;
4701                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4702         } else {
4703                 r1 = IWM_READ(sc, IWM_CSR_INT);
4704                 /* "hardware gone" (where, fishing?) */
4705                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4706                         goto out;
4707                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4708         }
4709         if (r1 == 0 && r2 == 0) {
4710                 goto out_ena;
4711         }
4712
4713         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4714
4715         /* ignored */
4716         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4717
4718         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4719 #ifdef IWM_DEBUG
4720                 int i;
4721                 struct ieee80211com *ic = sc->sc_ic;
4722                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4723
4724                 iwm_nic_error(sc);
4725
4726                 /* Dump driver status (TX and RX rings) while we're here. */
4727                 device_printf(sc->sc_dev, "driver status:\n");
4728                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4729                         struct iwm_tx_ring *ring = &sc->txq[i];
4730                         device_printf(sc->sc_dev,
4731                             "  tx ring %2d: qid=%-2d cur=%-3d "
4732                             "queued=%-3d\n",
4733                             i, ring->qid, ring->cur, ring->queued);
4734                 }
4735                 device_printf(sc->sc_dev,
4736                     "  rx ring: cur=%d\n", sc->rxq.cur);
4737                 device_printf(sc->sc_dev,
4738                     "  802.11 state %d\n", vap->iv_state);
4739 #endif
4740
4741                 device_printf(sc->sc_dev, "fatal firmware error\n");
4742                 ifp->if_flags &= ~IFF_UP;
4743                 iwm_stop_locked(ifp);
4744                 rv = 1;
4745                 goto out;
4746
4747         }
4748
4749         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4750                 handled |= IWM_CSR_INT_BIT_HW_ERR;
4751                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4752                 ifp->if_flags &= ~IFF_UP;
4753                 iwm_stop_locked(ifp);
4754                 rv = 1;
4755                 goto out;
4756         }
4757
4758         /* firmware chunk loaded */
4759         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4760                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4761                 handled |= IWM_CSR_INT_BIT_FH_TX;
4762                 sc->sc_fw_chunk_done = 1;
4763                 wakeup(&sc->sc_fw);
4764         }
4765
4766         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4767                 handled |= IWM_CSR_INT_BIT_RF_KILL;
4768                 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
4769                         device_printf(sc->sc_dev,
4770                             "%s: rfkill switch, disabling interface\n",
4771                             __func__);
4772                         ifp->if_flags &= ~IFF_UP;
4773                         iwm_stop_locked(ifp);
4774                 }
4775         }
4776
4777         /*
4778          * The Linux driver uses periodic interrupts to avoid races.
4779          * We cargo-cult like it's going out of fashion.
4780          */
4781         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4782                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4783                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4784                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4785                         IWM_WRITE_1(sc,
4786                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4787                 isperiodic = 1;
4788         }
4789
4790         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4791                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4792                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4793
4794                 iwm_notif_intr(sc);
4795
4796                 /* enable periodic interrupt, see above */
4797                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4798                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4799                             IWM_CSR_INT_PERIODIC_ENA);
4800         }
4801
4802         if (__predict_false(r1 & ~handled))
4803                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4804                     "%s: unhandled interrupts: %x\n", __func__, r1);
4805         rv = 1;
4806
4807  out_ena:
4808         iwm_restore_interrupts(sc);
4809  out:
4810         IWM_UNLOCK(sc);
4811         return;
4812 }
4813
4814 /*
4815  * Autoconf glue-sniffing
4816  */
4817 #define PCI_VENDOR_INTEL                0x8086
4818 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
4819 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
4820 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
4821 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
4822 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
4823 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
4824
4825 static const struct iwm_devices {
4826         uint16_t        device;
4827         const char      *name;
4828 } iwm_devices[] = {
4829         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4830         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4831         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4832         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4833         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4834         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4835 };
4836
4837 static int
4838 iwm_probe(device_t dev)
4839 {
4840         int i;
4841
4842         for (i = 0; i < nitems(iwm_devices); i++) {
4843                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4844                     pci_get_device(dev) == iwm_devices[i].device) {
4845                         device_set_desc(dev, iwm_devices[i].name);
4846                         return (BUS_PROBE_DEFAULT);
4847                 }
4848         }
4849
4850         return (ENXIO);
4851 }
4852
4853 static int
4854 iwm_dev_check(device_t dev)
4855 {
4856         struct iwm_softc *sc;
4857
4858         sc = device_get_softc(dev);
4859
4860         switch (pci_get_device(dev)) {
4861         case PCI_PRODUCT_INTEL_WL_3160_1:
4862         case PCI_PRODUCT_INTEL_WL_3160_2:
4863                 sc->sc_fwname = "iwm3160fw";
4864                 sc->host_interrupt_operation_mode = 1;
4865                 return (0);
4866         case PCI_PRODUCT_INTEL_WL_7260_1:
4867         case PCI_PRODUCT_INTEL_WL_7260_2:
4868                 sc->sc_fwname = "iwm7260fw";
4869                 sc->host_interrupt_operation_mode = 1;
4870                 return (0);
4871         case PCI_PRODUCT_INTEL_WL_7265_1:
4872         case PCI_PRODUCT_INTEL_WL_7265_2:
4873                 sc->sc_fwname = "iwm7265fw";
4874                 sc->host_interrupt_operation_mode = 0;
4875                 return (0);
4876         default:
4877                 device_printf(dev, "unknown adapter type\n");
4878                 return ENXIO;
4879         }
4880 }
4881
4882 static int
4883 iwm_pci_attach(device_t dev)
4884 {
4885         struct iwm_softc *sc;
4886         int count, error, rid;
4887         uint16_t reg;
4888 #if defined(__DragonFly__)
4889         int irq_flags;
4890 #endif
4891
4892         sc = device_get_softc(dev);
4893
4894         /* Clear device-specific "PCI retry timeout" register (41h). */
4895         reg = pci_read_config(dev, 0x40, sizeof(reg));
4896         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4897
4898         /* Enable bus-mastering and hardware bug workaround. */
4899         pci_enable_busmaster(dev);
4900         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4901         /* if !MSI */
4902         if (reg & PCIM_STATUS_INTxSTATE) {
4903                 reg &= ~PCIM_STATUS_INTxSTATE;
4904         }
4905         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4906
4907         rid = PCIR_BAR(0);
4908         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4909             RF_ACTIVE);
4910         if (sc->sc_mem == NULL) {
4911                 device_printf(sc->sc_dev, "can't map mem space\n");
4912                 return (ENXIO);
4913         }
4914         sc->sc_st = rman_get_bustag(sc->sc_mem);
4915         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4916
4917         /* Install interrupt handler. */
4918         count = 1;
4919         rid = 0;
4920 #if defined(__DragonFly__)
4921         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4922         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4923 #else
4924         if (pci_alloc_msi(dev, &count) == 0)
4925                 rid = 1;
4926         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4927             (rid != 0 ? 0 : RF_SHAREABLE));
4928 #endif
4929         if (sc->sc_irq == NULL) {
4930                 device_printf(dev, "can't map interrupt\n");
4931                         return (ENXIO);
4932         }
4933 #if defined(__DragonFly__)
4934         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4935                                iwm_intr, sc, &sc->sc_ih,
4936                                &wlan_global_serializer);
4937 #else
4938         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4939             NULL, iwm_intr, sc, &sc->sc_ih);
4940 #endif
4941         if (sc->sc_ih == NULL) {
4942                 device_printf(dev, "can't establish interrupt");
4943 #if defined(__DragonFly__)
4944                 pci_release_msi(dev);
4945 #endif
4946                         return (ENXIO);
4947         }
4948         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4949
4950         return (0);
4951 }
4952
4953 static void
4954 iwm_pci_detach(device_t dev)
4955 {
4956         struct iwm_softc *sc = device_get_softc(dev);
4957
4958         if (sc->sc_irq != NULL) {
4959                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4960                 bus_release_resource(dev, SYS_RES_IRQ,
4961                     rman_get_rid(sc->sc_irq), sc->sc_irq);
4962                 pci_release_msi(dev);
4963 #if defined(__DragonFly__)
4964                 sc->sc_irq = NULL;
4965 #endif
4966         }
4967         if (sc->sc_mem != NULL) {
4968                 bus_release_resource(dev, SYS_RES_MEMORY,
4969                     rman_get_rid(sc->sc_mem), sc->sc_mem);
4970 #if defined(__DragonFly__)
4971                 sc->sc_mem = NULL;
4972 #endif
4973         }
4974 }
4975
4976
4977
4978 static int
4979 iwm_attach(device_t dev)
4980 {
4981         struct iwm_softc *sc;
4982         struct ieee80211com *ic;
4983         struct ifnet *ifp;
4984         int error;
4985         int txq_i, i;
4986
4987         sc = device_get_softc(dev);
4988         sc->sc_dev = dev;
4989 #if defined(__DragonFly__)
4990         lockinit(&sc->sc_lk, "iwm_lk", 0, 0);
4991         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4992 #else
4993         mtx_init(&sc->sc_mtx, "iwm_mtx", MTX_DEF, 0);
4994         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4995 #endif
4996         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4997         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4998             taskqueue_thread_enqueue, &sc->sc_tq);
4999 #if defined(__DragonFly__)
5000         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5001                                         -1, "iwm_taskq");
5002 #else
5003         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5004 #endif
5005         if (error != 0) {
5006                 device_printf(dev, "can't start threads, error %d\n",
5007                     error);
5008                 goto fail;
5009         }
5010
5011         /* PCI attach */
5012         error = iwm_pci_attach(dev);
5013         if (error != 0)
5014                 goto fail;
5015
5016         sc->sc_wantresp = -1;
5017
5018         /* Check device type */
5019         error = iwm_dev_check(dev);
5020         if (error != 0)
5021                 goto fail;
5022
5023         sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5024
5025         /*
5026          * We now start fiddling with the hardware
5027          */
5028         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5029         if (iwm_prepare_card_hw(sc) != 0) {
5030                 device_printf(dev, "could not initialize hardware\n");
5031                 goto fail;
5032         }
5033
5034         /* Allocate DMA memory for firmware transfers. */
5035         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5036                 device_printf(dev, "could not allocate memory for firmware\n");
5037                 goto fail;
5038         }
5039
5040         /* Allocate "Keep Warm" page. */
5041         if ((error = iwm_alloc_kw(sc)) != 0) {
5042                 device_printf(dev, "could not allocate keep warm page\n");
5043                 goto fail;
5044         }
5045
5046         /* We use ICT interrupts */
5047         if ((error = iwm_alloc_ict(sc)) != 0) {
5048                 device_printf(dev, "could not allocate ICT table\n");
5049                 goto fail;
5050         }
5051
5052         /* Allocate TX scheduler "rings". */
5053         if ((error = iwm_alloc_sched(sc)) != 0) {
5054                 device_printf(dev, "could not allocate TX scheduler rings\n");
5055                 goto fail;
5056         }
5057
5058         /* Allocate TX rings */
5059         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5060                 if ((error = iwm_alloc_tx_ring(sc,
5061                     &sc->txq[txq_i], txq_i)) != 0) {
5062                         device_printf(dev,
5063                             "could not allocate TX ring %d\n",
5064                             txq_i);
5065                         goto fail;
5066                 }
5067         }
5068
5069         /* Allocate RX ring. */
5070         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5071                 device_printf(dev, "could not allocate RX ring\n");
5072                 goto fail;
5073         }
5074
5075         /* Clear pending interrupts. */
5076         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5077
5078         sc->sc_ifp = ifp = if_alloc(IFT_IEEE80211);
5079         if (ifp == NULL) {
5080                 goto fail;
5081         }
5082         ifp->if_softc = sc;
5083         if_initname(ifp, "iwm", device_get_unit(dev));
5084         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
5085         ifp->if_init = iwm_init;
5086         ifp->if_ioctl = iwm_ioctl;
5087         ifp->if_start = iwm_start;
5088 #if defined(__DragonFly__)
5089         ifp->if_nmbjclusters = IWM_RX_RING_COUNT;
5090         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
5091 #else
5092         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
5093         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
5094         IFQ_SET_READY(&ifp->if_snd);
5095 #endif
5096
5097         /*
5098          * Set it here so we can initialise net80211.
5099          * But, if we fail before we call net80211_ifattach(),
5100          * we can't just call iwm_detach() or it'll free
5101          * net80211 without it having been setup.
5102          */
5103         sc->sc_ic = ic = ifp->if_l2com;
5104         ic->ic_ifp = ifp;
5105 #if defined(__DragonFly__)
5106 #else
5107         ic->ic_softc = sc;
5108         ic->ic_name = device_get_nameunit(sc->sc_dev);
5109 #endif
5110         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5111         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5112
5113         /* Set device capabilities. */
5114         ic->ic_caps =
5115             IEEE80211_C_STA |
5116             IEEE80211_C_WPA |           /* WPA/RSN */
5117             IEEE80211_C_WME |
5118             IEEE80211_C_SHSLOT |        /* short slot time supported */
5119             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5120 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5121             ;
5122         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5123                 sc->sc_phyctxt[i].id = i;
5124                 sc->sc_phyctxt[i].color = 0;
5125                 sc->sc_phyctxt[i].ref = 0;
5126                 sc->sc_phyctxt[i].channel = NULL;
5127         }
5128
5129         /* Max RSSI */
5130         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5131         sc->sc_preinit_hook.ich_func = iwm_preinit;
5132         sc->sc_preinit_hook.ich_arg = sc;
5133         sc->sc_preinit_hook.ich_desc = "iwm";
5134         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5135                 device_printf(dev, "config_intrhook_establish failed\n");
5136                 goto fail;
5137         }
5138
5139 #ifdef IWM_DEBUG
5140         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5141             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5142             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5143 #endif
5144
5145         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5146             "<-%s\n", __func__);
5147
5148         return 0;
5149
5150         /* Free allocated memory if something failed during attachment. */
5151 fail:
5152         iwm_detach_local(sc, 0);
5153
5154         return ENXIO;
5155 }
5156
5157 static int
5158 iwm_update_edca(struct ieee80211com *ic)
5159 {
5160         struct iwm_softc *sc = ic->ic_ifp->if_softc;
5161
5162         device_printf(sc->sc_dev, "%s: called\n", __func__);
5163         return (0);
5164 }
5165
5166 static void
5167 iwm_preinit(void *arg)
5168 {
5169         struct iwm_softc *sc = arg;
5170         device_t dev = sc->sc_dev;
5171         struct ieee80211com *ic = sc->sc_ic;
5172         int error;
5173
5174         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5175             "->%s\n", __func__);
5176
5177         IWM_LOCK(sc);
5178         if ((error = iwm_start_hw(sc)) != 0) {
5179                 device_printf(dev, "could not initialize hardware\n");
5180                 IWM_UNLOCK(sc);
5181                 goto fail;
5182         }
5183
5184         error = iwm_run_init_mvm_ucode(sc, 1);
5185         iwm_stop_device(sc);
5186         if (error) {
5187                 IWM_UNLOCK(sc);
5188                 goto fail;
5189         }
5190         device_printf(dev,
5191             "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
5192             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5193             IWM_UCODE_MAJOR(sc->sc_fwver),
5194             IWM_UCODE_MINOR(sc->sc_fwver),
5195             IWM_UCODE_API(sc->sc_fwver));
5196
5197         /* not all hardware can do 5GHz band */
5198         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5199                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5200                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5201         IWM_UNLOCK(sc);
5202
5203         /*
5204          * At this point we've committed - if we fail to do setup,
5205          * we now also have to tear down the net80211 state.
5206          */
5207         wlan_serialize_enter();
5208         ieee80211_ifattach(ic, sc->sc_bssid);
5209         wlan_serialize_exit();
5210         ic->ic_vap_create = iwm_vap_create;
5211         ic->ic_vap_delete = iwm_vap_delete;
5212         ic->ic_raw_xmit = iwm_raw_xmit;
5213         ic->ic_node_alloc = iwm_node_alloc;
5214         ic->ic_scan_start = iwm_scan_start;
5215         ic->ic_scan_end = iwm_scan_end;
5216         ic->ic_update_mcast = iwm_update_mcast;
5217         ic->ic_set_channel = iwm_set_channel;
5218         ic->ic_scan_curchan = iwm_scan_curchan;
5219         ic->ic_scan_mindwell = iwm_scan_mindwell;
5220         ic->ic_wme.wme_update = iwm_update_edca;
5221         iwm_radiotap_attach(sc);
5222         if (bootverbose)
5223                 ieee80211_announce(ic);
5224
5225         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5226             "<-%s\n", __func__);
5227         config_intrhook_disestablish(&sc->sc_preinit_hook);
5228
5229         return;
5230 fail:
5231         config_intrhook_disestablish(&sc->sc_preinit_hook);
5232         iwm_detach_local(sc, 0);
5233 }
5234
5235 /*
5236  * Attach the interface to 802.11 radiotap.
5237  */
5238 static void
5239 iwm_radiotap_attach(struct iwm_softc *sc)
5240 {
5241         struct ifnet *ifp = sc->sc_ifp;
5242         struct ieee80211com *ic = ifp->if_l2com;
5243
5244         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5245             "->%s begin\n", __func__);
5246         ieee80211_radiotap_attach(ic,
5247             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5248                 IWM_TX_RADIOTAP_PRESENT,
5249             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5250                 IWM_RX_RADIOTAP_PRESENT);
5251         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5252             "->%s end\n", __func__);
5253 }
5254
5255 static struct ieee80211vap *
5256 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5257     enum ieee80211_opmode opmode, int flags,
5258     const uint8_t bssid[IEEE80211_ADDR_LEN],
5259     const uint8_t mac[IEEE80211_ADDR_LEN])
5260 {
5261         struct iwm_vap *ivp;
5262         struct ieee80211vap *vap;
5263         uint8_t mac1[IEEE80211_ADDR_LEN];
5264
5265         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5266                 return NULL;
5267         IEEE80211_ADDR_COPY(mac1, mac);
5268         ivp = (struct iwm_vap *) kmalloc(sizeof(struct iwm_vap),
5269                                         M_80211_VAP, M_INTWAIT | M_ZERO);
5270         vap = &ivp->iv_vap;
5271         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
5272         IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
5273         vap->iv_bmissthreshold = 10;            /* override default */
5274         /* Override with driver methods. */
5275         ivp->iv_newstate = vap->iv_newstate;
5276         vap->iv_newstate = iwm_newstate;
5277
5278         ieee80211_ratectl_init(vap);
5279         /* Complete setup. */
5280         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status);
5281         ic->ic_opmode = opmode;
5282
5283         return vap;
5284 }
5285
5286 static void
5287 iwm_vap_delete(struct ieee80211vap *vap)
5288 {
5289         struct iwm_vap *ivp = IWM_VAP(vap);
5290
5291         ieee80211_ratectl_deinit(vap);
5292         ieee80211_vap_detach(vap);
5293         kfree(ivp, M_80211_VAP);
5294 }
5295
5296 static void
5297 iwm_scan_start(struct ieee80211com *ic)
5298 {
5299         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5300         struct iwm_softc *sc = ic->ic_ifp->if_softc;
5301         int error;
5302
5303         if (sc->sc_scanband)
5304                 return;
5305         IWM_LOCK(sc);
5306         error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5307         if (error) {
5308                 device_printf(sc->sc_dev, "could not initiate scan\n");
5309                 IWM_UNLOCK(sc);
5310                 wlan_serialize_enter();
5311                 ieee80211_cancel_scan(vap);
5312                 wlan_serialize_exit();
5313         } else
5314                 IWM_UNLOCK(sc);
5315 }
5316
5317 static void
5318 iwm_scan_end(struct ieee80211com *ic)
5319 {
5320 }
5321
5322 static void
5323 iwm_update_mcast(struct ifnet *ifp)
5324 {
5325 }
5326
5327 static void
5328 iwm_set_channel(struct ieee80211com *ic)
5329 {
5330 }
5331
5332 static void
5333 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5334 {
5335 }
5336
5337 static void
5338 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5339 {
5340         return;
5341 }
5342
5343 void
5344 iwm_init_task(void *arg1)
5345 {
5346         struct iwm_softc *sc = arg1;
5347         struct ifnet *ifp = sc->sc_ifp;
5348
5349         IWM_LOCK(sc);
5350         while (sc->sc_flags & IWM_FLAG_BUSY) {
5351 #if defined(__DragonFly__)
5352                 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5353 #else
5354                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5355 #endif
5356 }
5357         sc->sc_flags |= IWM_FLAG_BUSY;
5358         iwm_stop_locked(ifp);
5359 #if defined(__DragonFly__)
5360         if ((ifp->if_flags & IFF_UP) &&
5361             (ifp->if_flags & IFF_RUNNING))
5362 #else
5363         if ((ifp->if_flags & IFF_UP) &&
5364             (ifp->if_drv_flags & IFF_DRV_RUNNING))
5365 #endif
5366                 iwm_init(sc);
5367         sc->sc_flags &= ~IWM_FLAG_BUSY;
5368         wakeup(&sc->sc_flags);
5369         IWM_UNLOCK(sc);
5370 }
5371
5372 static int
5373 iwm_resume(device_t dev)
5374 {
5375         uint16_t reg;
5376
5377         /* Clear device-specific "PCI retry timeout" register (41h). */
5378         reg = pci_read_config(dev, 0x40, sizeof(reg));
5379         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5380         iwm_init_task(device_get_softc(dev));
5381
5382         return 0;
5383 }
5384
5385 static int
5386 iwm_suspend(device_t dev)
5387 {
5388         struct iwm_softc *sc = device_get_softc(dev);
5389         struct ifnet *ifp = sc->sc_ifp;
5390
5391 #if defined(__DragonFly__)
5392         if (ifp->if_flags & IFF_RUNNING)
5393 #else
5394         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5395 #endif
5396                 iwm_stop(ifp, 0);
5397
5398         return (0);
5399 }
5400
5401 static int
5402 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5403 {
5404         struct ifnet *ifp = sc->sc_ifp;
5405         struct ieee80211com *ic;
5406         struct iwm_fw_info *fw = &sc->sc_fw;
5407         device_t dev = sc->sc_dev;
5408         int i;
5409
5410         if (sc->sc_tq) {
5411 #if defined(__DragonFly__)
5412                 /* doesn't exist for DFly, DFly drains tasks on free */
5413 #else
5414                 taskqueue_drain_all(sc->sc_tq);
5415 #endif
5416                 taskqueue_free(sc->sc_tq);
5417 #if defined(__DragonFly__)
5418                 sc->sc_tq = NULL;
5419 #endif
5420         }
5421         if (ifp) {
5422                 callout_drain(&sc->sc_watchdog_to);
5423                 ic = sc->sc_ic;
5424                 iwm_stop_device(sc);
5425                 if (ic && do_net80211) {
5426                         wlan_serialize_enter();
5427                         ieee80211_ifdetach(ic);
5428                         wlan_serialize_exit();
5429                 }
5430                 if_free(ifp);
5431 #if defined(__DragonFly__)
5432                 sc->sc_ifp = NULL;
5433 #endif
5434         }
5435
5436         /* Free descriptor rings */
5437         for (i = 0; i < nitems(sc->txq); i++)
5438                 iwm_free_tx_ring(sc, &sc->txq[i]);
5439
5440         /* Free firmware */
5441         if (fw->fw_rawdata != NULL)
5442                 iwm_fw_info_free(fw);
5443
5444         /* free scheduler */
5445         iwm_free_sched(sc);
5446         if (sc->ict_dma.vaddr != NULL)
5447                 iwm_free_ict(sc);
5448         if (sc->kw_dma.vaddr != NULL)
5449                 iwm_free_kw(sc);
5450         if (sc->fw_dma.vaddr != NULL)
5451                 iwm_free_fwmem(sc);
5452
5453         /* Finished with the hardware - detach things */
5454         iwm_pci_detach(dev);
5455
5456         lockuninit(&sc->sc_lk);
5457
5458         return (0);
5459 }
5460
5461 static int
5462 iwm_detach(device_t dev)
5463 {
5464         struct iwm_softc *sc = device_get_softc(dev);
5465         int error;
5466
5467         error = iwm_detach_local(sc, 1);
5468
5469         return error;
5470 }
5471
5472 static device_method_t iwm_pci_methods[] = {
5473         /* Device interface */
5474         DEVMETHOD(device_probe,         iwm_probe),
5475         DEVMETHOD(device_attach,        iwm_attach),
5476         DEVMETHOD(device_detach,        iwm_detach),
5477         DEVMETHOD(device_suspend,       iwm_suspend),
5478         DEVMETHOD(device_resume,        iwm_resume),
5479
5480         DEVMETHOD_END
5481 };
5482
5483 static driver_t iwm_pci_driver = {
5484         "iwm",
5485         iwm_pci_methods,
5486         sizeof (struct iwm_softc)
5487 };
5488
5489 static devclass_t iwm_devclass;
5490
5491 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5492 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5493 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5494 MODULE_DEPEND(iwm, wlan, 1, 1, 1);