Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      wlan_serialize_enter()/exit() hacks (will be removable when we
123  *                                           do the device netif removal).
124  *      header file paths (DFly allows localized path specifications).
125  *      minor header file differences.
126  *
127  * Comprehensive list of adjustments for DragonFly #ifdef'd:
128  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
129  *      packet counters
130  *      RUNNING and OACTIVE tests
131  *      msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
132  *      mtx -> lk  (mtx functions -> lockmgr functions)
133  *      callout differences
134  *      taskqueue differences
135  *      iwm_start() and ifq differences
136  *      iwm_ioctl() differences
137  *      MSI differences
138  *      bus_setup_intr() differences
139  *      minor PCI config register naming differences
140  */
141 #include <sys/cdefs.h>
142 __FBSDID("$FreeBSD$");
143
144 #include <sys/param.h>
145 #include <sys/bus.h>
146 #include <sys/endian.h>
147 #include <sys/firmware.h>
148 #include <sys/kernel.h>
149 #include <sys/malloc.h>
150 #include <sys/mbuf.h>
151 #include <sys/mutex.h>
152 #include <sys/module.h>
153 #include <sys/proc.h>
154 #include <sys/rman.h>
155 #include <sys/socket.h>
156 #include <sys/sockio.h>
157 #include <sys/sysctl.h>
158 #include <sys/linker.h>
159
160 #include <machine/endian.h>
161
162 #include <bus/pci/pcivar.h>
163 #include <bus/pci/pcireg.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_arp.h>
170 #include <net/ethernet.h>
171 #include <net/if_dl.h>
172 #include <net/if_media.h>
173 #include <net/if_types.h>
174 #include <net/ifq_var.h>
175
176 #include <netinet/in.h>
177 #include <netinet/in_systm.h>
178 #include <netinet/if_ether.h>
179 #include <netinet/ip.h>
180
181 #include <netproto/802_11/ieee80211_var.h>
182 #include <netproto/802_11/ieee80211_regdomain.h>
183 #include <netproto/802_11/ieee80211_ratectl.h>
184 #include <netproto/802_11/ieee80211_radiotap.h>
185
186 #include "if_iwmreg.h"
187 #include "if_iwmvar.h"
188 #include "if_iwm_debug.h"
189 #include "if_iwm_util.h"
190 #include "if_iwm_binding.h"
191 #include "if_iwm_phy_db.h"
192 #include "if_iwm_mac_ctxt.h"
193 #include "if_iwm_phy_ctxt.h"
194 #include "if_iwm_time_event.h"
195 #include "if_iwm_power.h"
196 #include "if_iwm_scan.h"
197 #include "if_iwm_pcie_trans.h"
198 #include "if_iwm_led.h"
199
200 const uint8_t iwm_nvm_channels[] = {
201         /* 2.4 GHz */
202         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
203         /* 5 GHz */
204         36, 40, 44 , 48, 52, 56, 60, 64,
205         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
206         149, 153, 157, 161, 165
207 };
208 #define IWM_NUM_2GHZ_CHANNELS   14
209
210 /*
211  * XXX For now, there's simply a fixed set of rate table entries
212  * that are populated.
213  */
214 const struct iwm_rate {
215         uint8_t rate;
216         uint8_t plcp;
217 } iwm_rates[] = {
218         {   2,  IWM_RATE_1M_PLCP  },
219         {   4,  IWM_RATE_2M_PLCP  },
220         {  11,  IWM_RATE_5M_PLCP  },
221         {  22,  IWM_RATE_11M_PLCP },
222         {  12,  IWM_RATE_6M_PLCP  },
223         {  18,  IWM_RATE_9M_PLCP  },
224         {  24,  IWM_RATE_12M_PLCP },
225         {  36,  IWM_RATE_18M_PLCP },
226         {  48,  IWM_RATE_24M_PLCP },
227         {  72,  IWM_RATE_36M_PLCP },
228         {  96,  IWM_RATE_48M_PLCP },
229         { 108,  IWM_RATE_54M_PLCP },
230 };
231 #define IWM_RIDX_CCK    0
232 #define IWM_RIDX_OFDM   4
233 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
234 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
235 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
236
237 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
238 static int      iwm_firmware_store_section(struct iwm_softc *,
239                                            enum iwm_ucode_type,
240                                            const uint8_t *, size_t);
241 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
242 static void     iwm_fw_info_free(struct iwm_fw_info *);
243 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
244 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
245 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
246                                      bus_size_t, bus_size_t);
247 static void     iwm_dma_contig_free(struct iwm_dma_info *);
248 static int      iwm_alloc_fwmem(struct iwm_softc *);
249 static void     iwm_free_fwmem(struct iwm_softc *);
250 static int      iwm_alloc_sched(struct iwm_softc *);
251 static void     iwm_free_sched(struct iwm_softc *);
252 static int      iwm_alloc_kw(struct iwm_softc *);
253 static void     iwm_free_kw(struct iwm_softc *);
254 static int      iwm_alloc_ict(struct iwm_softc *);
255 static void     iwm_free_ict(struct iwm_softc *);
256 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
257 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
258 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
259 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
260                                   int);
261 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
262 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
263 static void     iwm_enable_interrupts(struct iwm_softc *);
264 static void     iwm_restore_interrupts(struct iwm_softc *);
265 static void     iwm_disable_interrupts(struct iwm_softc *);
266 static void     iwm_ict_reset(struct iwm_softc *);
267 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
268 static void     iwm_stop_device(struct iwm_softc *);
269 static void     iwm_mvm_nic_config(struct iwm_softc *);
270 static int      iwm_nic_rx_init(struct iwm_softc *);
271 static int      iwm_nic_tx_init(struct iwm_softc *);
272 static int      iwm_nic_init(struct iwm_softc *);
273 static void     iwm_enable_txq(struct iwm_softc *, int, int);
274 static int      iwm_post_alive(struct iwm_softc *);
275 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
276                                    uint16_t, uint8_t *, uint16_t *);
277 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
278                                      uint16_t *);
279 static void     iwm_init_channel_map(struct iwm_softc *,
280                                      const uint16_t * const);
281 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282                                    const uint16_t *, const uint16_t *, uint8_t,
283                                    uint8_t);
284 struct iwm_nvm_section;
285 static int      iwm_parse_nvm_sections(struct iwm_softc *,
286                                        struct iwm_nvm_section *);
287 static int      iwm_nvm_init(struct iwm_softc *);
288 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
289                                         const uint8_t *, uint32_t);
290 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
291 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
292 static int      iwm_fw_alive(struct iwm_softc *, uint32_t);
293 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
294 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
295 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
296                                               enum iwm_ucode_type);
297 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
298 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
299 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
300 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
301                                             struct iwm_rx_phy_info *);
302 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
303                                       struct iwm_rx_packet *,
304                                       struct iwm_rx_data *);
305 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
306 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
307                                    struct iwm_rx_data *);
308 static void     iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
309                                          struct iwm_rx_packet *,
310                                          struct iwm_node *);
311 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
312                                   struct iwm_rx_data *);
313 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
314 #if 0
315 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
316                                  uint16_t);
317 #endif
318 static const struct iwm_rate *
319         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
320                         struct ieee80211_frame *, struct iwm_tx_cmd *);
321 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
322                        struct ieee80211_node *, int);
323 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
324                              const struct ieee80211_bpf_params *);
325 static void     iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
326                                              struct iwm_mvm_add_sta_cmd_v5 *);
327 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
328                                                 struct iwm_mvm_add_sta_cmd_v6 *,
329                                                 int *);
330 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
331                                        int);
332 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
333 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
334 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
335                                            struct iwm_int_sta *,
336                                            const uint8_t *, uint16_t, uint16_t);
337 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
338 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
339 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
340 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
341 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
342 static struct ieee80211_node *
343                 iwm_node_alloc(struct ieee80211vap *,
344                                const uint8_t[IEEE80211_ADDR_LEN]);
345 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
346 static int      iwm_media_change(struct ifnet *);
347 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
348 static void     iwm_endscan_cb(void *, int);
349 static int      iwm_init_hw(struct iwm_softc *);
350 static void     iwm_init(void *);
351 static void     iwm_init_locked(struct iwm_softc *);
352 #if defined(__DragonFly__)
353 static void     iwm_start(struct ifnet *,  struct ifaltq_subque *);
354 #else
355 static void     iwm_start(struct ifnet *);
356 #endif
357 static void     iwm_start_locked(struct ifnet *);
358 static void     iwm_stop(struct ifnet *, int);
359 static void     iwm_stop_locked(struct ifnet *);
360 static void     iwm_watchdog(void *);
361 #if defined(__DragonFly__)
362 static int      iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t, struct ucred *cred);
363 #else
364 static int      iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
365 #endif
366 #ifdef IWM_DEBUG
367 static const char *
368                 iwm_desc_lookup(uint32_t);
369 static void     iwm_nic_error(struct iwm_softc *);
370 #endif
371 static void     iwm_notif_intr(struct iwm_softc *);
372 static void     iwm_intr(void *);
373 static int      iwm_attach(device_t);
374 static void     iwm_preinit(void *);
375 static int      iwm_detach_local(struct iwm_softc *sc, int);
376 static void     iwm_init_task(void *);
377 static void     iwm_radiotap_attach(struct iwm_softc *);
378 static struct ieee80211vap *
379                 iwm_vap_create(struct ieee80211com *,
380                                const char [IFNAMSIZ], int,
381                                enum ieee80211_opmode, int,
382                                const uint8_t [IEEE80211_ADDR_LEN],
383                                const uint8_t [IEEE80211_ADDR_LEN]);
384 static void     iwm_vap_delete(struct ieee80211vap *);
385 static void     iwm_scan_start(struct ieee80211com *);
386 static void     iwm_scan_end(struct ieee80211com *);
387 static void     iwm_update_mcast(struct ifnet *);
388 static void     iwm_set_channel(struct ieee80211com *);
389 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
390 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
391 static int      iwm_detach(device_t);
392
393 #if defined(__DragonFly__)
394 static int      iwm_msi_enable = 1;
395
396 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
397
398 /*
399  * This is a hack due to the wlan_serializer deadlocking sleepers.
400  */
401 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
402
403 int
404 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
405 {
406         int error;
407
408         if (wlan_is_serialized()) {
409                 wlan_serialize_exit();
410                 error = lksleep(chan, lk, flags, wmesg, to);
411                 lockmgr(lk, LK_RELEASE);
412                 wlan_serialize_enter();
413                 lockmgr(lk, LK_EXCLUSIVE);
414         } else {
415                 error = lksleep(chan, lk, flags, wmesg, to);
416         }
417         return error;
418 }
419
420 #endif
421
422 /*
423  * Firmware parser.
424  */
425
426 static int
427 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
428 {
429         const struct iwm_fw_cscheme_list *l = (const void *)data;
430
431         if (dlen < sizeof(*l) ||
432             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
433                 return EINVAL;
434
435         /* we don't actually store anything for now, always use s/w crypto */
436
437         return 0;
438 }
439
440 static int
441 iwm_firmware_store_section(struct iwm_softc *sc,
442     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
443 {
444         struct iwm_fw_sects *fws;
445         struct iwm_fw_onesect *fwone;
446
447         if (type >= IWM_UCODE_TYPE_MAX)
448                 return EINVAL;
449         if (dlen < sizeof(uint32_t))
450                 return EINVAL;
451
452         fws = &sc->sc_fw.fw_sects[type];
453         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
454                 return EINVAL;
455
456         fwone = &fws->fw_sect[fws->fw_count];
457
458         /* first 32bit are device load offset */
459         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
460
461         /* rest is data */
462         fwone->fws_data = data + sizeof(uint32_t);
463         fwone->fws_len = dlen - sizeof(uint32_t);
464
465         fws->fw_count++;
466         fws->fw_totlen += fwone->fws_len;
467
468         return 0;
469 }
470
471 struct iwm_tlv_calib_data {
472         uint32_t ucode_type;
473         struct iwm_tlv_calib_ctrl calib;
474 } __packed;
475
476 static int
477 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
478 {
479         const struct iwm_tlv_calib_data *def_calib = data;
480         uint32_t ucode_type = le32toh(def_calib->ucode_type);
481
482         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
483                 device_printf(sc->sc_dev,
484                     "Wrong ucode_type %u for default "
485                     "calibration.\n", ucode_type);
486                 return EINVAL;
487         }
488
489         sc->sc_default_calib[ucode_type].flow_trigger =
490             def_calib->calib.flow_trigger;
491         sc->sc_default_calib[ucode_type].event_trigger =
492             def_calib->calib.event_trigger;
493
494         return 0;
495 }
496
497 static void
498 iwm_fw_info_free(struct iwm_fw_info *fw)
499 {
500         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
501         fw->fw_fp = NULL;
502         /* don't touch fw->fw_status */
503         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
504 }
505
506 static int
507 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
508 {
509         struct iwm_fw_info *fw = &sc->sc_fw;
510         const struct iwm_tlv_ucode_header *uhdr;
511         struct iwm_ucode_tlv tlv;
512         enum iwm_ucode_tlv_type tlv_type;
513         const struct firmware *fwp;
514         const uint8_t *data;
515         int error = 0;
516         size_t len;
517
518         if (fw->fw_status == IWM_FW_STATUS_DONE &&
519             ucode_type != IWM_UCODE_TYPE_INIT)
520                 return 0;
521
522         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
523 #if defined(__DragonFly__)
524                 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
525 #else
526                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
527 #endif
528         }
529         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
530
531         if (fw->fw_fp != NULL)
532                 iwm_fw_info_free(fw);
533
534         /*
535          * Load firmware into driver memory.
536          * fw_fp will be set.
537          */
538         IWM_UNLOCK(sc);
539         fwp = firmware_get(sc->sc_fwname);
540         if (fwp == NULL) {
541                 device_printf(sc->sc_dev,
542                     "could not read firmware %s (error %d)\n",
543                     sc->sc_fwname, error);
544                 IWM_LOCK(sc);
545                 goto out;
546         }
547         IWM_LOCK(sc);
548         fw->fw_fp = fwp;
549
550         /*
551          * Parse firmware contents
552          */
553
554         uhdr = fw->fw_fp->data;
555         if (*(const uint32_t *)fw->fw_fp->data != 0
556             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
557                 device_printf(sc->sc_dev, "invalid firmware %s\n",
558                     sc->sc_fwname);
559                 error = EINVAL;
560                 goto out;
561         }
562
563         sc->sc_fwver = le32toh(uhdr->ver);
564         data = uhdr->data;
565         len = fw->fw_fp->datasize - sizeof(*uhdr);
566
567         while (len >= sizeof(tlv)) {
568                 size_t tlv_len;
569                 const void *tlv_data;
570
571                 memcpy(&tlv, data, sizeof(tlv));
572                 tlv_len = le32toh(tlv.length);
573                 tlv_type = le32toh(tlv.type);
574
575                 len -= sizeof(tlv);
576                 data += sizeof(tlv);
577                 tlv_data = data;
578
579                 if (len < tlv_len) {
580                         device_printf(sc->sc_dev,
581                             "firmware too short: %zu bytes\n",
582                             len);
583                         error = EINVAL;
584                         goto parse_out;
585                 }
586
587                 switch ((int)tlv_type) {
588                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
589                         if (tlv_len < sizeof(uint32_t)) {
590                                 device_printf(sc->sc_dev,
591                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
592                                     __func__,
593                                     (int) tlv_len);
594                                 error = EINVAL;
595                                 goto parse_out;
596                         }
597                         sc->sc_capa_max_probe_len
598                             = le32toh(*(const uint32_t *)tlv_data);
599                         /* limit it to something sensible */
600                         if (sc->sc_capa_max_probe_len > (1<<16)) {
601                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
602                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
603                                     "ridiculous\n", __func__);
604                                 error = EINVAL;
605                                 goto parse_out;
606                         }
607                         break;
608                 case IWM_UCODE_TLV_PAN:
609                         if (tlv_len) {
610                                 device_printf(sc->sc_dev,
611                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
612                                     __func__,
613                                     (int) tlv_len);
614                                 error = EINVAL;
615                                 goto parse_out;
616                         }
617                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
618                         break;
619                 case IWM_UCODE_TLV_FLAGS:
620                         if (tlv_len < sizeof(uint32_t)) {
621                                 device_printf(sc->sc_dev,
622                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
623                                     __func__,
624                                     (int) tlv_len);
625                                 error = EINVAL;
626                                 goto parse_out;
627                         }
628                         /*
629                          * Apparently there can be many flags, but Linux driver
630                          * parses only the first one, and so do we.
631                          *
632                          * XXX: why does this override IWM_UCODE_TLV_PAN?
633                          * Intentional or a bug?  Observations from
634                          * current firmware file:
635                          *  1) TLV_PAN is parsed first
636                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
637                          * ==> this resets TLV_PAN to itself... hnnnk
638                          */
639                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
640                         break;
641                 case IWM_UCODE_TLV_CSCHEME:
642                         if ((error = iwm_store_cscheme(sc,
643                             tlv_data, tlv_len)) != 0) {
644                                 device_printf(sc->sc_dev,
645                                     "%s: iwm_store_cscheme(): returned %d\n",
646                                     __func__,
647                                     error);
648                                 goto parse_out;
649                         }
650                         break;
651                 case IWM_UCODE_TLV_NUM_OF_CPU:
652                         if (tlv_len != sizeof(uint32_t)) {
653                                 device_printf(sc->sc_dev,
654                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
655                                     __func__,
656                                     (int) tlv_len);
657                                 error = EINVAL;
658                                 goto parse_out;
659                         }
660                         if (le32toh(*(const uint32_t*)tlv_data) != 1) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: driver supports "
663                                     "only TLV_NUM_OF_CPU == 1",
664                                     __func__);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         break;
669                 case IWM_UCODE_TLV_SEC_RT:
670                         if ((error = iwm_firmware_store_section(sc,
671                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
674                                     __func__,
675                                     error);
676                                 goto parse_out;
677                         }
678                         break;
679                 case IWM_UCODE_TLV_SEC_INIT:
680                         if ((error = iwm_firmware_store_section(sc,
681                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
682                                 device_printf(sc->sc_dev,
683                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
684                                     __func__,
685                                     error);
686                                 goto parse_out;
687                         }
688                         break;
689                 case IWM_UCODE_TLV_SEC_WOWLAN:
690                         if ((error = iwm_firmware_store_section(sc,
691                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
692                                 device_printf(sc->sc_dev,
693                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
694                                     __func__,
695                                     error);
696                                 goto parse_out;
697                         }
698                         break;
699                 case IWM_UCODE_TLV_DEF_CALIB:
700                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
701                                 device_printf(sc->sc_dev,
702                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
703                                     __func__,
704                                     (int) tlv_len,
705                                     (int) sizeof(struct iwm_tlv_calib_data));
706                                 error = EINVAL;
707                                 goto parse_out;
708                         }
709                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
710                                 device_printf(sc->sc_dev,
711                                     "%s: iwm_set_default_calib() failed: %d\n",
712                                     __func__,
713                                     error);
714                                 goto parse_out;
715                         }
716                         break;
717                 case IWM_UCODE_TLV_PHY_SKU:
718                         if (tlv_len != sizeof(uint32_t)) {
719                                 error = EINVAL;
720                                 device_printf(sc->sc_dev,
721                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
722                                     __func__,
723                                     (int) tlv_len);
724                                 goto parse_out;
725                         }
726                         sc->sc_fw_phy_config =
727                             le32toh(*(const uint32_t *)tlv_data);
728                         break;
729
730                 case IWM_UCODE_TLV_API_CHANGES_SET:
731                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
732                         /* ignore, not used by current driver */
733                         break;
734
735                 default:
736                         device_printf(sc->sc_dev,
737                             "%s: unknown firmware section %d, abort\n",
738                             __func__, tlv_type);
739                         error = EINVAL;
740                         goto parse_out;
741                 }
742
743                 len -= roundup(tlv_len, 4);
744                 data += roundup(tlv_len, 4);
745         }
746
747         KASSERT(error == 0, ("unhandled error"));
748
749  parse_out:
750         if (error) {
751                 device_printf(sc->sc_dev, "firmware parse error %d, "
752                     "section type %d\n", error, tlv_type);
753         }
754
755         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
756                 device_printf(sc->sc_dev,
757                     "device uses unsupported power ops\n");
758                 error = ENOTSUP;
759         }
760
761  out:
762         if (error) {
763                 fw->fw_status = IWM_FW_STATUS_NONE;
764                 if (fw->fw_fp != NULL)
765                         iwm_fw_info_free(fw);
766         } else
767                 fw->fw_status = IWM_FW_STATUS_DONE;
768         wakeup(&sc->sc_fw);
769
770         return error;
771 }
772
773 /*
774  * DMA resource routines
775  */
776
777 static void
778 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
779 {
780         if (error != 0)
781                 return;
782         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
783         *(bus_addr_t *)arg = segs[0].ds_addr;
784 }
785
786 static int
787 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
788     bus_size_t size, bus_size_t alignment)
789 {
790         int error;
791
792         dma->tag = NULL;
793         dma->size = size;
794
795 #if defined(__DragonFly__)
796         error = bus_dma_tag_create(tag, alignment,
797                                    0,
798                                    BUS_SPACE_MAXADDR_32BIT,
799                                    BUS_SPACE_MAXADDR,
800                                    NULL, NULL,
801                                    size, 1, size,
802                                    BUS_DMA_NOWAIT, &dma->tag);
803 #else
804         error = bus_dma_tag_create(tag, alignment,
805             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
806             1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
807 #endif
808         if (error != 0)
809                 goto fail;
810
811         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
812             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
813         if (error != 0)
814                 goto fail;
815
816         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
817             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
818         if (error != 0)
819                 goto fail;
820
821         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
822
823         return 0;
824
825 fail:
826         iwm_dma_contig_free(dma);
827
828         return error;
829 }
830
831 static void
832 iwm_dma_contig_free(struct iwm_dma_info *dma)
833 {
834         if (dma->map != NULL) {
835                 if (dma->vaddr != NULL) {
836                         bus_dmamap_sync(dma->tag, dma->map,
837                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
838                         bus_dmamap_unload(dma->tag, dma->map);
839                         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
840                         dma->vaddr = NULL;
841                 }
842                 bus_dmamap_destroy(dma->tag, dma->map);
843                 dma->map = NULL;
844         }
845         if (dma->tag != NULL) {
846                 bus_dma_tag_destroy(dma->tag);
847                 dma->tag = NULL;
848         }
849
850 }
851
852 /* fwmem is used to load firmware onto the card */
853 static int
854 iwm_alloc_fwmem(struct iwm_softc *sc)
855 {
856         /* Must be aligned on a 16-byte boundary. */
857         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
858             sc->sc_fwdmasegsz, 16);
859 }
860
861 static void
862 iwm_free_fwmem(struct iwm_softc *sc)
863 {
864         iwm_dma_contig_free(&sc->fw_dma);
865 }
866
867 /* tx scheduler rings.  not used? */
868 static int
869 iwm_alloc_sched(struct iwm_softc *sc)
870 {
871         int rv;
872
873         /* TX scheduler rings must be aligned on a 1KB boundary. */
874         rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
875             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
876         return rv;
877 }
878
879 static void
880 iwm_free_sched(struct iwm_softc *sc)
881 {
882         iwm_dma_contig_free(&sc->sched_dma);
883 }
884
885 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
886 static int
887 iwm_alloc_kw(struct iwm_softc *sc)
888 {
889         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
890 }
891
892 static void
893 iwm_free_kw(struct iwm_softc *sc)
894 {
895         iwm_dma_contig_free(&sc->kw_dma);
896 }
897
898 /* interrupt cause table */
899 static int
900 iwm_alloc_ict(struct iwm_softc *sc)
901 {
902         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
903             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
904 }
905
906 static void
907 iwm_free_ict(struct iwm_softc *sc)
908 {
909         iwm_dma_contig_free(&sc->ict_dma);
910 }
911
912 static int
913 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
914 {
915         bus_size_t size;
916         int i, error;
917
918         ring->cur = 0;
919
920         /* Allocate RX descriptors (256-byte aligned). */
921         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
922         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
923         if (error != 0) {
924                 device_printf(sc->sc_dev,
925                     "could not allocate RX ring DMA memory\n");
926                 goto fail;
927         }
928         ring->desc = ring->desc_dma.vaddr;
929
930         /* Allocate RX status area (16-byte aligned). */
931         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
932             sizeof(*ring->stat), 16);
933         if (error != 0) {
934                 device_printf(sc->sc_dev,
935                     "could not allocate RX status DMA memory\n");
936                 goto fail;
937         }
938         ring->stat = ring->stat_dma.vaddr;
939
940         /* Create RX buffer DMA tag. */
941 #if defined(__DragonFly__)
942         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
943                                    0,
944                                    BUS_SPACE_MAXADDR_32BIT,
945                                    BUS_SPACE_MAXADDR,
946                                    NULL, NULL,
947                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
948                                    BUS_DMA_NOWAIT, &ring->data_dmat);
949 #else
950         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
951             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
952             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
953             &ring->data_dmat);
954 #endif
955         if (error != 0) {
956                 device_printf(sc->sc_dev,
957                     "%s: could not create RX buf DMA tag, error %d\n",
958                     __func__, error);
959                 goto fail;
960         }
961
962         /*
963          * Allocate and map RX buffers.
964          */
965         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
966                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
967                         goto fail;
968                 }
969         }
970         return 0;
971
972 fail:   iwm_free_rx_ring(sc, ring);
973         return error;
974 }
975
976 static void
977 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
978 {
979         /* XXX conditional nic locks are stupid */
980         /* XXX print out if we can't lock the NIC? */
981         if (iwm_nic_lock(sc)) {
982                 /* XXX handle if RX stop doesn't finish? */
983                 (void) iwm_pcie_rx_stop(sc);
984                 iwm_nic_unlock(sc);
985         }
986         ring->cur = 0;
987
988         /*
989          * The hw rx ring index in shared memory must also be cleared,
990          * otherwise the discrepancy can cause reprocessing chaos.
991          */
992         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
993 }
994
995 static void
996 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
997 {
998         int i;
999
1000         iwm_dma_contig_free(&ring->desc_dma);
1001         iwm_dma_contig_free(&ring->stat_dma);
1002
1003         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1004                 struct iwm_rx_data *data = &ring->data[i];
1005
1006                 if (data->m != NULL) {
1007                         bus_dmamap_sync(ring->data_dmat, data->map,
1008                             BUS_DMASYNC_POSTREAD);
1009                         bus_dmamap_unload(ring->data_dmat, data->map);
1010                         m_freem(data->m);
1011                         data->m = NULL;
1012                 }
1013                 if (data->map != NULL) {
1014                         bus_dmamap_destroy(ring->data_dmat, data->map);
1015                         data->map = NULL;
1016                 }
1017         }
1018         if (ring->data_dmat != NULL) {
1019                 bus_dma_tag_destroy(ring->data_dmat);
1020                 ring->data_dmat = NULL;
1021         }
1022 }
1023
1024 static int
1025 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1026 {
1027         bus_addr_t paddr;
1028         bus_size_t size;
1029         int i, error;
1030
1031         ring->qid = qid;
1032         ring->queued = 0;
1033         ring->cur = 0;
1034
1035         /* Allocate TX descriptors (256-byte aligned). */
1036         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1037         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1038         if (error != 0) {
1039                 device_printf(sc->sc_dev,
1040                     "could not allocate TX ring DMA memory\n");
1041                 goto fail;
1042         }
1043         ring->desc = ring->desc_dma.vaddr;
1044
1045         /*
1046          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1047          * to allocate commands space for other rings.
1048          */
1049         if (qid > IWM_MVM_CMD_QUEUE)
1050                 return 0;
1051
1052         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1053         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1054         if (error != 0) {
1055                 device_printf(sc->sc_dev,
1056                     "could not allocate TX cmd DMA memory\n");
1057                 goto fail;
1058         }
1059         ring->cmd = ring->cmd_dma.vaddr;
1060
1061 #if defined(__DragonFly__)
1062         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1063                                    0,
1064                                    BUS_SPACE_MAXADDR_32BIT,
1065                                    BUS_SPACE_MAXADDR,
1066                                    NULL, NULL,
1067                                    MCLBYTES, IWM_MAX_SCATTER - 2, MCLBYTES,
1068                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1069 #else
1070         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1071             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1072             IWM_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1073             &ring->data_dmat);
1074 #endif
1075         if (error != 0) {
1076                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1077                 goto fail;
1078         }
1079
1080         paddr = ring->cmd_dma.paddr;
1081         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1082                 struct iwm_tx_data *data = &ring->data[i];
1083
1084                 data->cmd_paddr = paddr;
1085                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1086                     + offsetof(struct iwm_tx_cmd, scratch);
1087                 paddr += sizeof(struct iwm_device_cmd);
1088
1089                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1090                 if (error != 0) {
1091                         device_printf(sc->sc_dev,
1092                             "could not create TX buf DMA map\n");
1093                         goto fail;
1094                 }
1095         }
1096         KASSERT(paddr == ring->cmd_dma.paddr + size,
1097             ("invalid physical address"));
1098         return 0;
1099
1100 fail:   iwm_free_tx_ring(sc, ring);
1101         return error;
1102 }
1103
1104 static void
1105 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1106 {
1107         int i;
1108
1109         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1110                 struct iwm_tx_data *data = &ring->data[i];
1111
1112                 if (data->m != NULL) {
1113                         bus_dmamap_sync(ring->data_dmat, data->map,
1114                             BUS_DMASYNC_POSTWRITE);
1115                         bus_dmamap_unload(ring->data_dmat, data->map);
1116                         m_freem(data->m);
1117                         data->m = NULL;
1118                 }
1119         }
1120         /* Clear TX descriptors. */
1121         memset(ring->desc, 0, ring->desc_dma.size);
1122         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1123             BUS_DMASYNC_PREWRITE);
1124         sc->qfullmsk &= ~(1 << ring->qid);
1125         ring->queued = 0;
1126         ring->cur = 0;
1127 }
1128
1129 static void
1130 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1131 {
1132         int i;
1133
1134         iwm_dma_contig_free(&ring->desc_dma);
1135         iwm_dma_contig_free(&ring->cmd_dma);
1136
1137         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1138                 struct iwm_tx_data *data = &ring->data[i];
1139
1140                 if (data->m != NULL) {
1141                         bus_dmamap_sync(ring->data_dmat, data->map,
1142                             BUS_DMASYNC_POSTWRITE);
1143                         bus_dmamap_unload(ring->data_dmat, data->map);
1144                         m_freem(data->m);
1145                         data->m = NULL;
1146                 }
1147                 if (data->map != NULL) {
1148                         bus_dmamap_destroy(ring->data_dmat, data->map);
1149                         data->map = NULL;
1150                 }
1151         }
1152         if (ring->data_dmat != NULL) {
1153                 bus_dma_tag_destroy(ring->data_dmat);
1154                 ring->data_dmat = NULL;
1155         }
1156 }
1157
1158 /*
1159  * High-level hardware frobbing routines
1160  */
1161
1162 static void
1163 iwm_enable_interrupts(struct iwm_softc *sc)
1164 {
1165         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1166         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1167 }
1168
1169 static void
1170 iwm_restore_interrupts(struct iwm_softc *sc)
1171 {
1172         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1173 }
1174
1175 static void
1176 iwm_disable_interrupts(struct iwm_softc *sc)
1177 {
1178         /* disable interrupts */
1179         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1180
1181         /* acknowledge all interrupts */
1182         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1183         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1184 }
1185
1186 static void
1187 iwm_ict_reset(struct iwm_softc *sc)
1188 {
1189         iwm_disable_interrupts(sc);
1190
1191         /* Reset ICT table. */
1192         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1193         sc->ict_cur = 0;
1194
1195         /* Set physical address of ICT table (4KB aligned). */
1196         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1197             IWM_CSR_DRAM_INT_TBL_ENABLE
1198             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1199             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1200
1201         /* Switch to ICT interrupt mode in driver. */
1202         sc->sc_flags |= IWM_FLAG_USE_ICT;
1203
1204         /* Re-enable interrupts. */
1205         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1206         iwm_enable_interrupts(sc);
1207 }
1208
1209 /*
1210  * Since this .. hard-resets things, it's time to actually
1211  * mark the first vap (if any) as having no mac context.
1212  * It's annoying, but since the driver is potentially being
1213  * stop/start'ed whilst active (thanks openbsd port!) we
1214  * have to correctly track this.
1215  */
1216 static void
1217 iwm_stop_device(struct iwm_softc *sc)
1218 {
1219         struct ieee80211com *ic = sc->sc_ic;
1220         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1221         int chnl, ntries;
1222         int qid;
1223
1224         /* tell the device to stop sending interrupts */
1225         iwm_disable_interrupts(sc);
1226
1227         /*
1228          * FreeBSD-local: mark the first vap as not-uploaded,
1229          * so the next transition through auth/assoc
1230          * will correctly populate the MAC context.
1231          */
1232         if (vap) {
1233                 struct iwm_vap *iv = IWM_VAP(vap);
1234                 iv->is_uploaded = 0;
1235         }
1236
1237         /* device going down, Stop using ICT table */
1238         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1239
1240         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1241
1242         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1243
1244         /* Stop all DMA channels. */
1245         if (iwm_nic_lock(sc)) {
1246                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1247                         IWM_WRITE(sc,
1248                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1249                         for (ntries = 0; ntries < 200; ntries++) {
1250                                 uint32_t r;
1251
1252                                 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1253                                 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1254                                     chnl))
1255                                         break;
1256                                 DELAY(20);
1257                         }
1258                 }
1259                 iwm_nic_unlock(sc);
1260         }
1261
1262         /* Stop RX ring. */
1263         iwm_reset_rx_ring(sc, &sc->rxq);
1264
1265         /* Reset all TX rings. */
1266         for (qid = 0; qid < nitems(sc->txq); qid++)
1267                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1268
1269         /*
1270          * Power-down device's busmaster DMA clocks
1271          */
1272         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1273         DELAY(5);
1274
1275         /* Make sure (redundant) we've released our request to stay awake */
1276         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1277             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1278
1279         /* Stop the device, and put it in low power state */
1280         iwm_apm_stop(sc);
1281
1282         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1283          * Clean again the interrupt here
1284          */
1285         iwm_disable_interrupts(sc);
1286         /* stop and reset the on-board processor */
1287         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1288
1289         /*
1290          * Even if we stop the HW, we still want the RF kill
1291          * interrupt
1292          */
1293         iwm_enable_rfkill_int(sc);
1294         iwm_check_rfkill(sc);
1295 }
1296
1297 static void
1298 iwm_mvm_nic_config(struct iwm_softc *sc)
1299 {
1300         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1301         uint32_t reg_val = 0;
1302
1303         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1304             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1305         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1306             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1307         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1308             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1309
1310         /* SKU control */
1311         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1312             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1313         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1314             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1315
1316         /* radio configuration */
1317         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1318         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1319         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1320
1321         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1322
1323         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1324             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1325             radio_cfg_step, radio_cfg_dash);
1326
1327         /*
1328          * W/A : NIC is stuck in a reset state after Early PCIe power off
1329          * (PCIe power is lost before PERST# is asserted), causing ME FW
1330          * to lose ownership and not being able to obtain it back.
1331          */
1332         iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1333             IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1334             ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1335 }
1336
1337 static int
1338 iwm_nic_rx_init(struct iwm_softc *sc)
1339 {
1340         if (!iwm_nic_lock(sc))
1341                 return EBUSY;
1342
1343         /*
1344          * Initialize RX ring.  This is from the iwn driver.
1345          */
1346         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1347
1348         /* stop DMA */
1349         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1350         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1351         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1352         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1353         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1354
1355         /* Set physical address of RX ring (256-byte aligned). */
1356         IWM_WRITE(sc,
1357             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1358
1359         /* Set physical address of RX status (16-byte aligned). */
1360         IWM_WRITE(sc,
1361             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1362
1363 #if defined(__DragonFly__)
1364         /* Force serialization (probably not needed but don't trust the HW) */
1365         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1366 #endif
1367
1368         /* Enable RX. */
1369         /*
1370          * Note: Linux driver also sets this:
1371          *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1372          *
1373          * It causes weird behavior.  YMMV.
1374          */
1375         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1376             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1377             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1378             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1379             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1380             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1381
1382         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1383
1384         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1385         if (sc->host_interrupt_operation_mode)
1386                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1387
1388         /*
1389          * Thus sayeth el jefe (iwlwifi) via a comment:
1390          *
1391          * This value should initially be 0 (before preparing any
1392          * RBs), should be 8 after preparing the first 8 RBs (for example)
1393          */
1394         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1395
1396         iwm_nic_unlock(sc);
1397
1398         return 0;
1399 }
1400
1401 static int
1402 iwm_nic_tx_init(struct iwm_softc *sc)
1403 {
1404         int qid;
1405
1406         if (!iwm_nic_lock(sc))
1407                 return EBUSY;
1408
1409         /* Deactivate TX scheduler. */
1410         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1411
1412         /* Set physical address of "keep warm" page (16-byte aligned). */
1413         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1414
1415         /* Initialize TX rings. */
1416         for (qid = 0; qid < nitems(sc->txq); qid++) {
1417                 struct iwm_tx_ring *txq = &sc->txq[qid];
1418
1419                 /* Set physical address of TX ring (256-byte aligned). */
1420                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1421                     txq->desc_dma.paddr >> 8);
1422                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1423                     "%s: loading ring %d descriptors (%p) at %lx\n",
1424                     __func__,
1425                     qid, txq->desc,
1426                     (unsigned long) (txq->desc_dma.paddr >> 8));
1427         }
1428         iwm_nic_unlock(sc);
1429
1430         return 0;
1431 }
1432
1433 static int
1434 iwm_nic_init(struct iwm_softc *sc)
1435 {
1436         int error;
1437
1438         iwm_apm_init(sc);
1439         iwm_set_pwr(sc);
1440
1441         iwm_mvm_nic_config(sc);
1442
1443         if ((error = iwm_nic_rx_init(sc)) != 0)
1444                 return error;
1445
1446         /*
1447          * Ditto for TX, from iwn
1448          */
1449         if ((error = iwm_nic_tx_init(sc)) != 0)
1450                 return error;
1451
1452         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1453             "%s: shadow registers enabled\n", __func__);
1454         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1455
1456         return 0;
1457 }
1458
1459 enum iwm_mvm_tx_fifo {
1460         IWM_MVM_TX_FIFO_BK = 0,
1461         IWM_MVM_TX_FIFO_BE,
1462         IWM_MVM_TX_FIFO_VI,
1463         IWM_MVM_TX_FIFO_VO,
1464         IWM_MVM_TX_FIFO_MCAST = 5,
1465 };
1466
1467 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1468         IWM_MVM_TX_FIFO_VO,
1469         IWM_MVM_TX_FIFO_VI,
1470         IWM_MVM_TX_FIFO_BE,
1471         IWM_MVM_TX_FIFO_BK,
1472 };
1473
1474 static void
1475 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1476 {
1477         if (!iwm_nic_lock(sc)) {
1478                 device_printf(sc->sc_dev,
1479                     "%s: cannot enable txq %d\n",
1480                     __func__,
1481                     qid);
1482                 return; /* XXX return EBUSY */
1483         }
1484
1485         /* unactivate before configuration */
1486         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1487             (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1488             | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1489
1490         if (qid != IWM_MVM_CMD_QUEUE) {
1491                 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1492         }
1493
1494         iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1495
1496         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1497         iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1498
1499         iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1500         /* Set scheduler window size and frame limit. */
1501         iwm_write_mem32(sc,
1502             sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1503             sizeof(uint32_t),
1504             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1505             IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1506             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1507             IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1508
1509         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1510             (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1511             (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1512             (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1513             IWM_SCD_QUEUE_STTS_REG_MSK);
1514
1515         iwm_nic_unlock(sc);
1516
1517         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1518             "%s: enabled txq %d FIFO %d\n",
1519             __func__, qid, fifo);
1520 }
1521
1522 static int
1523 iwm_post_alive(struct iwm_softc *sc)
1524 {
1525         int nwords;
1526         int error, chnl;
1527
1528         if (!iwm_nic_lock(sc))
1529                 return EBUSY;
1530
1531         if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1532                 device_printf(sc->sc_dev,
1533                     "%s: sched addr mismatch",
1534                     __func__);
1535                 error = EINVAL;
1536                 goto out;
1537         }
1538
1539         iwm_ict_reset(sc);
1540
1541         /* Clear TX scheduler state in SRAM. */
1542         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1543             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1544             / sizeof(uint32_t);
1545         error = iwm_write_mem(sc,
1546             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1547             NULL, nwords);
1548         if (error)
1549                 goto out;
1550
1551         /* Set physical address of TX scheduler rings (1KB aligned). */
1552         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1553
1554         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1555
1556         /* enable command channel */
1557         iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1558
1559         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1560
1561         /* Enable DMA channels. */
1562         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1563                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1564                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1565                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1566         }
1567
1568         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1569             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1570
1571         /* Enable L1-Active */
1572         iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1573             IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1574
1575  out:
1576         iwm_nic_unlock(sc);
1577         return error;
1578 }
1579
1580 /*
1581  * NVM read access and content parsing.  We do not support
1582  * external NVM or writing NVM.
1583  * iwlwifi/mvm/nvm.c
1584  */
1585
1586 /* list of NVM sections we are allowed/need to read */
1587 const int nvm_to_read[] = {
1588         IWM_NVM_SECTION_TYPE_HW,
1589         IWM_NVM_SECTION_TYPE_SW,
1590         IWM_NVM_SECTION_TYPE_CALIBRATION,
1591         IWM_NVM_SECTION_TYPE_PRODUCTION,
1592 };
1593
1594 /* Default NVM size to read */
1595 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1596 #define IWM_MAX_NVM_SECTION_SIZE 7000
1597
1598 #define IWM_NVM_WRITE_OPCODE 1
1599 #define IWM_NVM_READ_OPCODE 0
1600
1601 static int
1602 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1603         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1604 {
1605         offset = 0;
1606         struct iwm_nvm_access_cmd nvm_access_cmd = {
1607                 .offset = htole16(offset),
1608                 .length = htole16(length),
1609                 .type = htole16(section),
1610                 .op_code = IWM_NVM_READ_OPCODE,
1611         };
1612         struct iwm_nvm_access_resp *nvm_resp;
1613         struct iwm_rx_packet *pkt;
1614         struct iwm_host_cmd cmd = {
1615                 .id = IWM_NVM_ACCESS_CMD,
1616                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1617                     IWM_CMD_SEND_IN_RFKILL,
1618                 .data = { &nvm_access_cmd, },
1619         };
1620         int ret, bytes_read, offset_read;
1621         uint8_t *resp_data;
1622
1623         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1624
1625         ret = iwm_send_cmd(sc, &cmd);
1626         if (ret)
1627                 return ret;
1628
1629         pkt = cmd.resp_pkt;
1630         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1631                 device_printf(sc->sc_dev,
1632                     "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1633                     __func__, pkt->hdr.flags);
1634                 ret = EIO;
1635                 goto exit;
1636         }
1637
1638         /* Extract NVM response */
1639         nvm_resp = (void *)pkt->data;
1640
1641         ret = le16toh(nvm_resp->status);
1642         bytes_read = le16toh(nvm_resp->length);
1643         offset_read = le16toh(nvm_resp->offset);
1644         resp_data = nvm_resp->data;
1645         if (ret) {
1646                 device_printf(sc->sc_dev,
1647                     "%s: NVM access command failed with status %d\n",
1648                     __func__, ret);
1649                 ret = EINVAL;
1650                 goto exit;
1651         }
1652
1653         if (offset_read != offset) {
1654                 device_printf(sc->sc_dev,
1655                     "%s: NVM ACCESS response with invalid offset %d\n",
1656                     __func__, offset_read);
1657                 ret = EINVAL;
1658                 goto exit;
1659         }
1660
1661         memcpy(data + offset, resp_data, bytes_read);
1662         *len = bytes_read;
1663
1664  exit:
1665         iwm_free_resp(sc, &cmd);
1666         return ret;
1667 }
1668
1669 /*
1670  * Reads an NVM section completely.
1671  * NICs prior to 7000 family doesn't have a real NVM, but just read
1672  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1673  * by uCode, we need to manually check in this case that we don't
1674  * overflow and try to read more than the EEPROM size.
1675  * For 7000 family NICs, we supply the maximal size we can read, and
1676  * the uCode fills the response with as much data as we can,
1677  * without overflowing, so no check is needed.
1678  */
1679 static int
1680 iwm_nvm_read_section(struct iwm_softc *sc,
1681         uint16_t section, uint8_t *data, uint16_t *len)
1682 {
1683         uint16_t length, seglen;
1684         int error;
1685
1686         /* Set nvm section read length */
1687         length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1688         *len = 0;
1689
1690         /* Read the NVM until exhausted (reading less than requested) */
1691         while (seglen == length) {
1692                 error = iwm_nvm_read_chunk(sc,
1693                     section, *len, length, data, &seglen);
1694                 if (error) {
1695                         device_printf(sc->sc_dev,
1696                             "Cannot read NVM from section "
1697                             "%d offset %d, length %d\n",
1698                             section, *len, length);
1699                         return error;
1700                 }
1701                 *len += seglen;
1702         }
1703
1704         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1705             "NVM section %d read completed\n", section);
1706         return 0;
1707 }
1708
1709 /*
1710  * BEGIN IWM_NVM_PARSE
1711  */
1712
1713 /* NVM offsets (in words) definitions */
1714 enum wkp_nvm_offsets {
1715         /* NVM HW-Section offset (in words) definitions */
1716         IWM_HW_ADDR = 0x15,
1717
1718 /* NVM SW-Section offset (in words) definitions */
1719         IWM_NVM_SW_SECTION = 0x1C0,
1720         IWM_NVM_VERSION = 0,
1721         IWM_RADIO_CFG = 1,
1722         IWM_SKU = 2,
1723         IWM_N_HW_ADDRS = 3,
1724         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1725
1726 /* NVM calibration section offset (in words) definitions */
1727         IWM_NVM_CALIB_SECTION = 0x2B8,
1728         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1729 };
1730
1731 /* SKU Capabilities (actual values from NVM definition) */
1732 enum nvm_sku_bits {
1733         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1734         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1735         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1736         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1737 };
1738
1739 /* radio config bits (actual values from NVM definition) */
1740 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1741 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1742 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1743 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1744 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1745 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1746
1747 #define DEFAULT_MAX_TX_POWER 16
1748
1749 /**
1750  * enum iwm_nvm_channel_flags - channel flags in NVM
1751  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1752  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1753  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1754  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1755  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1756  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1757  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1758  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1759  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1760  */
1761 enum iwm_nvm_channel_flags {
1762         IWM_NVM_CHANNEL_VALID = (1 << 0),
1763         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1764         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1765         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1766         IWM_NVM_CHANNEL_DFS = (1 << 7),
1767         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1768         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1769         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1770         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1771 };
1772
1773 /*
1774  * Add a channel to the net80211 channel list.
1775  *
1776  * ieee is the ieee channel number
1777  * ch_idx is channel index.
1778  * mode is the channel mode - CHAN_A, CHAN_B, CHAN_G.
1779  * ch_flags is the iwm channel flags.
1780  *
1781  * Return 0 on OK, < 0 on error.
1782  */
1783 static int
1784 iwm_init_net80211_channel(struct iwm_softc *sc, int ieee, int ch_idx,
1785     int mode, uint16_t ch_flags)
1786 {
1787         /* XXX for now, no overflow checking! */
1788         struct ieee80211com *ic =  sc->sc_ic;
1789         int is_5ghz, flags;
1790         struct ieee80211_channel *channel;
1791
1792         channel = &ic->ic_channels[ic->ic_nchans++];
1793         channel->ic_ieee = ieee;
1794
1795         is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
1796         if (!is_5ghz) {
1797                 flags = IEEE80211_CHAN_2GHZ;
1798                 channel->ic_flags = mode;
1799         } else {
1800                 flags = IEEE80211_CHAN_5GHZ;
1801                 channel->ic_flags = mode;
1802         }
1803         channel->ic_freq = ieee80211_ieee2mhz(ieee, flags);
1804
1805         if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
1806                 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
1807         return (0);
1808 }
1809
1810 static void
1811 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
1812 {
1813         struct ieee80211com *ic =  sc->sc_ic;
1814         struct iwm_nvm_data *data = &sc->sc_nvm;
1815         int ch_idx;
1816         uint16_t ch_flags;
1817         int hw_value;
1818
1819         for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
1820                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1821
1822                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
1823                     !data->sku_cap_band_52GHz_enable)
1824                         ch_flags &= ~IWM_NVM_CHANNEL_VALID;
1825
1826                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1827                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1828                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1829                             iwm_nvm_channels[ch_idx],
1830                             ch_flags,
1831                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1832                             "5.2" : "2.4");
1833                         continue;
1834                 }
1835
1836                 hw_value = iwm_nvm_channels[ch_idx];
1837
1838                 /* 5GHz? */
1839                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS) {
1840                         (void) iwm_init_net80211_channel(sc, hw_value,
1841                             ch_idx,
1842                             IEEE80211_CHAN_A,
1843                             ch_flags);
1844                 } else {
1845                         (void) iwm_init_net80211_channel(sc, hw_value,
1846                             ch_idx,
1847                             IEEE80211_CHAN_B,
1848                             ch_flags);
1849                         /* If it's not channel 13, also add 11g */
1850                         if (hw_value != 13)
1851                                 (void) iwm_init_net80211_channel(sc, hw_value,
1852                                     ch_idx,
1853                                     IEEE80211_CHAN_G,
1854                                     ch_flags);
1855                 }
1856
1857                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1858                     "Ch. %d Flags %x [%sGHz] - Added\n",
1859                     iwm_nvm_channels[ch_idx],
1860                     ch_flags,
1861                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1862                     "5.2" : "2.4");
1863         }
1864         ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1865 }
1866
1867 static int
1868 iwm_parse_nvm_data(struct iwm_softc *sc,
1869         const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1870         const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1871 {
1872         struct iwm_nvm_data *data = &sc->sc_nvm;
1873         uint8_t hw_addr[IEEE80211_ADDR_LEN];
1874         uint16_t radio_cfg, sku;
1875
1876         data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1877
1878         radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1879         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1880         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1881         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1882         data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1883         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1884         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1885
1886         sku = le16_to_cpup(nvm_sw + IWM_SKU);
1887         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1888         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1889         data->sku_cap_11n_enable = 0;
1890
1891         if (!data->valid_tx_ant || !data->valid_rx_ant) {
1892                 device_printf(sc->sc_dev,
1893                     "%s: invalid antennas (0x%x, 0x%x)\n",
1894                     __func__, data->valid_tx_ant,
1895                     data->valid_rx_ant);
1896                 return EINVAL;
1897         }
1898
1899         data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1900
1901         data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1902         data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1903
1904         /* The byte order is little endian 16 bit, meaning 214365 */
1905         IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1906         data->hw_addr[0] = hw_addr[1];
1907         data->hw_addr[1] = hw_addr[0];
1908         data->hw_addr[2] = hw_addr[3];
1909         data->hw_addr[3] = hw_addr[2];
1910         data->hw_addr[4] = hw_addr[5];
1911         data->hw_addr[5] = hw_addr[4];
1912
1913         iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
1914         data->calib_version = 255;   /* TODO:
1915                                         this value will prevent some checks from
1916                                         failing, we need to check if this
1917                                         field is still needed, and if it does,
1918                                         where is it in the NVM */
1919
1920         return 0;
1921 }
1922
1923 /*
1924  * END NVM PARSE
1925  */
1926
1927 struct iwm_nvm_section {
1928         uint16_t length;
1929         const uint8_t *data;
1930 };
1931
1932 static int
1933 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1934 {
1935         const uint16_t *hw, *sw, *calib;
1936
1937         /* Checking for required sections */
1938         if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1939             !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1940                 device_printf(sc->sc_dev,
1941                     "%s: Can't parse empty NVM sections\n",
1942                     __func__);
1943                 return ENOENT;
1944         }
1945
1946         hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1947         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1948         calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1949         return iwm_parse_nvm_data(sc, hw, sw, calib,
1950             IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1951 }
1952
1953 static int
1954 iwm_nvm_init(struct iwm_softc *sc)
1955 {
1956         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1957         int i, section, error;
1958         uint16_t len;
1959         uint8_t *nvm_buffer, *temp;
1960
1961         /* Read From FW NVM */
1962         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1963             "%s: Read NVM\n",
1964             __func__);
1965
1966         /* TODO: find correct NVM max size for a section */
1967         nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1968         if (nvm_buffer == NULL)
1969                 return (ENOMEM);
1970         for (i = 0; i < nitems(nvm_to_read); i++) {
1971                 section = nvm_to_read[i];
1972                 KASSERT(section <= nitems(nvm_sections),
1973                     ("too many sections"));
1974
1975                 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1976                 if (error)
1977                         break;
1978
1979                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1980                 if (temp == NULL) {
1981                         error = ENOMEM;
1982                         break;
1983                 }
1984                 memcpy(temp, nvm_buffer, len);
1985                 nvm_sections[section].data = temp;
1986                 nvm_sections[section].length = len;
1987         }
1988         kfree(nvm_buffer, M_DEVBUF);
1989         if (error)
1990                 return error;
1991
1992         return iwm_parse_nvm_sections(sc, nvm_sections);
1993 }
1994
1995 /*
1996  * Firmware loading gunk.  This is kind of a weird hybrid between the
1997  * iwn driver and the Linux iwlwifi driver.
1998  */
1999
2000 static int
2001 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2002         const uint8_t *section, uint32_t byte_cnt)
2003 {
2004         struct iwm_dma_info *dma = &sc->fw_dma;
2005         int error;
2006
2007         /* Copy firmware section into pre-allocated DMA-safe memory. */
2008         memcpy(dma->vaddr, section, byte_cnt);
2009         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2010
2011         if (!iwm_nic_lock(sc))
2012                 return EBUSY;
2013
2014         sc->sc_fw_chunk_done = 0;
2015
2016         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2017             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2018         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2019             dst_addr);
2020         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2021             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2022         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2023             (iwm_get_dma_hi_addr(dma->paddr)
2024               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2025         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2026             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2027             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2028             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2029         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2030             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2031             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2032             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2033
2034         iwm_nic_unlock(sc);
2035
2036         /* wait 1s for this segment to load */
2037         error = 0;
2038         while (!sc->sc_fw_chunk_done) {
2039 #if defined(__DragonFly__)
2040                 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2041 #else
2042                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2043 #endif
2044                 if (error)
2045                         break;
2046         }
2047
2048         return error;
2049 }
2050
2051 static int
2052 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2053 {
2054         struct iwm_fw_sects *fws;
2055         int error, i, w;
2056         const void *data;
2057         uint32_t dlen;
2058         uint32_t offset;
2059
2060         sc->sc_uc.uc_intr = 0;
2061
2062         fws = &sc->sc_fw.fw_sects[ucode_type];
2063         for (i = 0; i < fws->fw_count; i++) {
2064                 data = fws->fw_sect[i].fws_data;
2065                 dlen = fws->fw_sect[i].fws_len;
2066                 offset = fws->fw_sect[i].fws_devoff;
2067                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2068                     "LOAD FIRMWARE type %d offset %u len %d\n",
2069                     ucode_type, offset, dlen);
2070                 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2071                 if (error) {
2072                         device_printf(sc->sc_dev,
2073                             "%s: chunk %u of %u returned error %02d\n",
2074                             __func__, i, fws->fw_count, error);
2075                         return error;
2076                 }
2077         }
2078
2079         /* wait for the firmware to load */
2080         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2081
2082         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2083 #if defined(__DragonFly__)
2084                 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2085 #else
2086                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2087 #endif
2088         }
2089
2090         return error;
2091 }
2092
2093 static int
2094 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2095 {
2096         int error;
2097
2098         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2099
2100         if ((error = iwm_nic_init(sc)) != 0) {
2101                 device_printf(sc->sc_dev, "unable to init nic\n");
2102                 return error;
2103         }
2104
2105         /* make sure rfkill handshake bits are cleared */
2106         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2107         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2108             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2109
2110         /* clear (again), then enable host interrupts */
2111         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2112         iwm_enable_interrupts(sc);
2113
2114         /* really make sure rfkill handshake bits are cleared */
2115         /* maybe we should write a few times more?  just to make sure */
2116         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2117         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2118
2119         /* Load the given image to the HW */
2120         return iwm_load_firmware(sc, ucode_type);
2121 }
2122
2123 static int
2124 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2125 {
2126         return iwm_post_alive(sc);
2127 }
2128
2129 static int
2130 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2131 {
2132         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2133                 .valid = htole32(valid_tx_ant),
2134         };
2135
2136         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2137             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2138 }
2139
2140 static int
2141 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2142 {
2143         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2144         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2145
2146         /* Set parameters */
2147         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2148         phy_cfg_cmd.calib_control.event_trigger =
2149             sc->sc_default_calib[ucode_type].event_trigger;
2150         phy_cfg_cmd.calib_control.flow_trigger =
2151             sc->sc_default_calib[ucode_type].flow_trigger;
2152
2153         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2154             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2155         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2156             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2157 }
2158
2159 static int
2160 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2161         enum iwm_ucode_type ucode_type)
2162 {
2163         enum iwm_ucode_type old_type = sc->sc_uc_current;
2164         int error;
2165
2166         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2167                 kprintf("iwm_read_firmweare: failed %d\n",
2168                         error);
2169                 return error;
2170         }
2171
2172         sc->sc_uc_current = ucode_type;
2173         error = iwm_start_fw(sc, ucode_type);
2174         if (error) {
2175                 kprintf("iwm_start_fw: failed %d\n", error);
2176                 sc->sc_uc_current = old_type;
2177                 return error;
2178         }
2179
2180         error = iwm_fw_alive(sc, sc->sched_base);
2181         if (error) {
2182                 kprintf("iwm_fw_alive: failed %d\n", error);
2183         }
2184         return error;
2185 }
2186
2187 /*
2188  * mvm misc bits
2189  */
2190
2191 static int
2192 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2193 {
2194         int error;
2195
2196         /* do not operate with rfkill switch turned on */
2197         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2198                 device_printf(sc->sc_dev,
2199                     "radio is disabled by hardware switch\n");
2200                 return EPERM;
2201         }
2202
2203         sc->sc_init_complete = 0;
2204         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2205             IWM_UCODE_TYPE_INIT)) != 0) {
2206                 kprintf("iwm_mvm_load_ucode_wait_alive: failed %d\n",
2207                         error);
2208                 return error;
2209         }
2210
2211         if (justnvm) {
2212                 if ((error = iwm_nvm_init(sc)) != 0) {
2213                         device_printf(sc->sc_dev, "failed to read nvm\n");
2214                         return error;
2215                 }
2216                 IEEE80211_ADDR_COPY(sc->sc_bssid, sc->sc_nvm.hw_addr);
2217
2218                 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2219                     + sc->sc_capa_max_probe_len
2220                     + IWM_MAX_NUM_SCAN_CHANNELS
2221                     * sizeof(struct iwm_scan_channel);
2222                 sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2223                     M_INTWAIT);
2224                 if (sc->sc_scan_cmd == NULL)
2225                         return (ENOMEM);
2226
2227                 return 0;
2228         }
2229
2230         /* Send TX valid antennas before triggering calibrations */
2231         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2232                 kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2233                 return error;
2234         }
2235
2236         /*
2237         * Send phy configurations command to init uCode
2238         * to start the 16.0 uCode init image internal calibrations.
2239         */
2240         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2241                 device_printf(sc->sc_dev,
2242                     "%s: failed to run internal calibration: %d\n",
2243                     __func__, error);
2244                 return error;
2245         }
2246
2247         /*
2248          * Nothing to do but wait for the init complete notification
2249          * from the firmware
2250          */
2251         while (!sc->sc_init_complete) {
2252 #if defined(__DragonFly__)
2253                 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2254                                  0, "iwminit", 2*hz);
2255 #else
2256                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2257                                  0, "iwminit", 2*hz);
2258 #endif
2259                 if (error) {
2260                         kprintf("init complete failed %d\n",
2261                                 sc->sc_init_complete);
2262                         break;
2263                 }
2264         }
2265
2266         return error;
2267 }
2268
2269 /*
2270  * receive side
2271  */
2272
2273 /* (re)stock rx ring, called at init-time and at runtime */
2274 static int
2275 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2276 {
2277         struct iwm_rx_ring *ring = &sc->rxq;
2278         struct iwm_rx_data *data = &ring->data[idx];
2279         struct mbuf *m;
2280         int error;
2281         bus_addr_t paddr;
2282
2283         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2284         if (m == NULL)
2285                 return ENOBUFS;
2286
2287         if (data->m != NULL)
2288                 bus_dmamap_unload(ring->data_dmat, data->map);
2289
2290         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2291         error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2292         if (error != 0) {
2293                 device_printf(sc->sc_dev,
2294                     "%s: could not create RX buf DMA map, error %d\n",
2295                     __func__, error);
2296                 goto fail;
2297         }
2298         data->m = m;
2299         error = bus_dmamap_load(ring->data_dmat, data->map,
2300             mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2301             &paddr, BUS_DMA_NOWAIT);
2302         if (error != 0 && error != EFBIG) {
2303                 device_printf(sc->sc_dev,
2304                     "%s: can't not map mbuf, error %d\n", __func__,
2305                     error);
2306                 goto fail;
2307         }
2308         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2309
2310         /* Update RX descriptor. */
2311         KKASSERT((paddr & 255) == 0);
2312         ring->desc[idx] = htole32(paddr >> 8);
2313         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2314             BUS_DMASYNC_PREWRITE);
2315
2316         return 0;
2317 fail:
2318         return error;
2319 }
2320
2321 #define IWM_RSSI_OFFSET 50
2322 static int
2323 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2324 {
2325         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2326         uint32_t agc_a, agc_b;
2327         uint32_t val;
2328
2329         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2330         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2331         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2332
2333         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2334         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2335         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2336
2337         /*
2338          * dBm = rssi dB - agc dB - constant.
2339          * Higher AGC (higher radio gain) means lower signal.
2340          */
2341         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2342         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2343         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2344
2345         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2346             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2347             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2348
2349         return max_rssi_dbm;
2350 }
2351
2352 /*
2353  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2354  * values are reported by the fw as positive values - need to negate
2355  * to obtain their dBM.  Account for missing antennas by replacing 0
2356  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2357  */
2358 static int
2359 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2360 {
2361         int energy_a, energy_b, energy_c, max_energy;
2362         uint32_t val;
2363
2364         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2365         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2366             IWM_RX_INFO_ENERGY_ANT_A_POS;
2367         energy_a = energy_a ? -energy_a : -256;
2368         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2369             IWM_RX_INFO_ENERGY_ANT_B_POS;
2370         energy_b = energy_b ? -energy_b : -256;
2371         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2372             IWM_RX_INFO_ENERGY_ANT_C_POS;
2373         energy_c = energy_c ? -energy_c : -256;
2374         max_energy = MAX(energy_a, energy_b);
2375         max_energy = MAX(max_energy, energy_c);
2376
2377         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2378             "energy In A %d B %d C %d , and max %d\n",
2379             energy_a, energy_b, energy_c, max_energy);
2380
2381         return max_energy;
2382 }
2383
2384 static void
2385 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2386         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2387 {
2388         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2389
2390         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2391         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2392
2393         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2394 }
2395
2396 /*
2397  * Retrieve the average noise (in dBm) among receivers.
2398  */
2399 static int
2400 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2401 {
2402         int i, total, nbant, noise;
2403
2404         total = nbant = noise = 0;
2405         for (i = 0; i < 3; i++) {
2406                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2407                 if (noise) {
2408                         total += noise;
2409                         nbant++;
2410                 }
2411         }
2412
2413         /* There should be at least one antenna but check anyway. */
2414         return (nbant == 0) ? -127 : (total / nbant) - 107;
2415 }
2416
2417 /*
2418  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2419  *
2420  * Handles the actual data of the Rx packet from the fw
2421  */
2422 static void
2423 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2424         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2425 {
2426         struct ieee80211com *ic = sc->sc_ic;
2427         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2428         struct ieee80211_frame *wh;
2429         struct ieee80211_node *ni;
2430         struct ieee80211_rx_stats rxs;
2431         struct mbuf *m;
2432         struct iwm_rx_phy_info *phy_info;
2433         struct iwm_rx_mpdu_res_start *rx_res;
2434         uint32_t len;
2435         uint32_t rx_pkt_status;
2436         int rssi;
2437
2438         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2439
2440         phy_info = &sc->sc_last_phy_info;
2441         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2442         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2443         len = le16toh(rx_res->byte_count);
2444         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2445
2446         m = data->m;
2447         m->m_data = pkt->data + sizeof(*rx_res);
2448         m->m_pkthdr.len = m->m_len = len;
2449
2450         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2451                 device_printf(sc->sc_dev,
2452                     "dsp size out of range [0,20]: %d\n",
2453                     phy_info->cfg_phy_cnt);
2454                 return;
2455         }
2456
2457         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2458             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2459                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2460                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2461                 return; /* drop */
2462         }
2463
2464         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2465                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2466         } else {
2467                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2468         }
2469         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2470         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2471
2472         /* replenish ring for the buffer we're going to feed to the sharks */
2473         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2474                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2475                     __func__);
2476                 return;
2477         }
2478
2479         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2480
2481         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2482             "%s: phy_info: channel=%d, flags=0x%08x\n",
2483             __func__,
2484             le16toh(phy_info->channel),
2485             le16toh(phy_info->phy_flags));
2486
2487         /*
2488          * Populate an RX state struct with the provided information.
2489          */
2490         bzero(&rxs, sizeof(rxs));
2491 #if !defined(__DragonFly__)
2492         /* requires new fbsd stack */
2493         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2494 #endif
2495         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2496 #if defined(__DragonFly__)
2497         uint16_t c_freq;
2498         uint8_t c_ieee;
2499         c_ieee = le16toh(phy_info->channel);
2500         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2501                 c_freq = ieee80211_ieee2mhz(c_ieee, IEEE80211_CHAN_2GHZ);
2502         } else {
2503                 c_freq = ieee80211_ieee2mhz(c_ieee, IEEE80211_CHAN_5GHZ);
2504         }
2505 #else
2506         rxs.c_ieee = le16toh(phy_info->channel);
2507         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2508                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2509         } else {
2510                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2511         }
2512 #endif
2513         rxs.rssi = rssi - sc->sc_noise;
2514         rxs.nf = sc->sc_noise;
2515
2516         if (ieee80211_radiotap_active_vap(vap)) {
2517                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2518
2519                 tap->wr_flags = 0;
2520                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2521                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2522 #if defined(__DragonFly__)
2523                 tap->wr_chan_freq = htole16(c_freq);
2524 #else
2525                 tap->wr_chan_freq = htole16(rxs.c_freq);
2526 #endif
2527                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2528                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2529                 tap->wr_dbm_antsignal = (int8_t)rssi;
2530                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2531                 tap->wr_tsft = phy_info->system_timestamp;
2532                 switch (phy_info->rate) {
2533                 /* CCK rates. */
2534                 case  10: tap->wr_rate =   2; break;
2535                 case  20: tap->wr_rate =   4; break;
2536                 case  55: tap->wr_rate =  11; break;
2537                 case 110: tap->wr_rate =  22; break;
2538                 /* OFDM rates. */
2539                 case 0xd: tap->wr_rate =  12; break;
2540                 case 0xf: tap->wr_rate =  18; break;
2541                 case 0x5: tap->wr_rate =  24; break;
2542                 case 0x7: tap->wr_rate =  36; break;
2543                 case 0x9: tap->wr_rate =  48; break;
2544                 case 0xb: tap->wr_rate =  72; break;
2545                 case 0x1: tap->wr_rate =  96; break;
2546                 case 0x3: tap->wr_rate = 108; break;
2547                 /* Unknown rate: should not happen. */
2548                 default:  tap->wr_rate =   0;
2549                 }
2550         }
2551
2552         IWM_UNLOCK(sc);
2553         if (ni != NULL) {
2554                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2555                 ieee80211_input_mimo(ni, m, &rxs);
2556                 ieee80211_free_node(ni);
2557         } else {
2558                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2559                 ieee80211_input_mimo_all(ic, m, &rxs);
2560         }
2561         IWM_LOCK(sc);
2562 }
2563
2564 static void
2565 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2566         struct iwm_node *in)
2567 {
2568         struct ifnet *ifp = sc->sc_ifp;
2569         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2570         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2571         int failack = tx_resp->failure_frame;
2572
2573         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2574
2575         /* Update rate control statistics. */
2576         if (status != IWM_TX_STATUS_SUCCESS &&
2577             status != IWM_TX_STATUS_DIRECT_DONE) {
2578 #if defined(__DragonFly__)
2579                 ++ifp->if_oerrors;
2580 #else
2581                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2582 #endif
2583                 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2584                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2585
2586         } else {
2587 #if defined(__DragonFly__)
2588                 ++ifp->if_opackets;
2589 #else
2590                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2591 #endif
2592                 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2593                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2594         }
2595 }
2596
2597 static void
2598 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2599         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2600 {
2601         struct ifnet *ifp = sc->sc_ifp;
2602         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2603         int idx = cmd_hdr->idx;
2604         int qid = cmd_hdr->qid;
2605         struct iwm_tx_ring *ring = &sc->txq[qid];
2606         struct iwm_tx_data *txd = &ring->data[idx];
2607         struct iwm_node *in = txd->in;
2608
2609         if (txd->done) {
2610                 device_printf(sc->sc_dev,
2611                     "%s: got tx interrupt that's already been handled!\n",
2612                     __func__);
2613                 return;
2614         }
2615         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2616
2617         sc->sc_tx_timer = 0;
2618
2619         iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2620
2621         /* Unmap and free mbuf. */
2622         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2623         bus_dmamap_unload(ring->data_dmat, txd->map);
2624         m_freem(txd->m);
2625
2626         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2627             "free txd %p, in %p\n", txd, txd->in);
2628         KASSERT(txd->done == 0, ("txd not done"));
2629         txd->done = 1;
2630         KASSERT(txd->in, ("txd without node"));
2631
2632         txd->m = NULL;
2633         txd->in = NULL;
2634         ieee80211_free_node((struct ieee80211_node *)in);
2635
2636         if (--ring->queued < IWM_TX_RING_LOMARK) {
2637                 sc->qfullmsk &= ~(1 << ring->qid);
2638 #if defined(__DragonFly__)
2639                 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2640                         ifq_clr_oactive(&ifp->if_snd);
2641 #else
2642                 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_DRV_OACTIVE)) {
2643                         ifp->if_flags &= ~IFF_DRV_OACTIVE;
2644 #endif
2645                         /*
2646                          * Well, we're in interrupt context, but then again
2647                          * I guess net80211 does all sorts of stunts in
2648                          * interrupt context, so maybe this is no biggie.
2649                          */
2650                         iwm_start_locked(ifp);
2651                 }
2652         }
2653 }
2654
2655 /*
2656  * transmit side
2657  */
2658
2659 /*
2660  * Process a "command done" firmware notification.  This is where we wakeup
2661  * processes waiting for a synchronous command completion.
2662  * from if_iwn
2663  */
2664 static void
2665 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2666 {
2667         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2668         struct iwm_tx_data *data;
2669
2670         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2671                 return; /* Not a command ack. */
2672         }
2673
2674         data = &ring->data[pkt->hdr.idx];
2675
2676         /* If the command was mapped in an mbuf, free it. */
2677         if (data->m != NULL) {
2678                 bus_dmamap_sync(ring->data_dmat, data->map,
2679                     BUS_DMASYNC_POSTWRITE);
2680                 bus_dmamap_unload(ring->data_dmat, data->map);
2681                 m_freem(data->m);
2682                 data->m = NULL;
2683         }
2684         wakeup(&ring->desc[pkt->hdr.idx]);
2685 }
2686
2687 #if 0
2688 /*
2689  * necessary only for block ack mode
2690  */
2691 void
2692 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2693         uint16_t len)
2694 {
2695         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2696         uint16_t w_val;
2697
2698         scd_bc_tbl = sc->sched_dma.vaddr;
2699
2700         len += 8; /* magic numbers came naturally from paris */
2701         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2702                 len = roundup(len, 4) / 4;
2703
2704         w_val = htole16(sta_id << 12 | len);
2705
2706         /* Update TX scheduler. */
2707         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2708         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2709             BUS_DMASYNC_PREWRITE);
2710
2711         /* I really wonder what this is ?!? */
2712         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2713                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2714                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2715                     BUS_DMASYNC_PREWRITE);
2716         }
2717 }
2718 #endif
2719
2720 /*
2721  * Take an 802.11 (non-n) rate, find the relevant rate
2722  * table entry.  return the index into in_ridx[].
2723  *
2724  * The caller then uses that index back into in_ridx
2725  * to figure out the rate index programmed /into/
2726  * the firmware for this given node.
2727  */
2728 static int
2729 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2730     uint8_t rate)
2731 {
2732         int i;
2733         uint8_t r;
2734
2735         for (i = 0; i < nitems(in->in_ridx); i++) {
2736                 r = iwm_rates[in->in_ridx[i]].rate;
2737                 if (rate == r)
2738                         return (i);
2739         }
2740         /* XXX Return the first */
2741         /* XXX TODO: have it return the /lowest/ */
2742         return (0);
2743 }
2744
2745 /*
2746  * Fill in various bit for management frames, and leave them
2747  * unfilled for data frames (firmware takes care of that).
2748  * Return the selected TX rate.
2749  */
2750 static const struct iwm_rate *
2751 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2752         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2753 {
2754         struct ieee80211com *ic = sc->sc_ic;
2755         struct ieee80211_node *ni = &in->in_ni;
2756         const struct iwm_rate *rinfo;
2757         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2758         int ridx, rate_flags;
2759
2760         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2761         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2762
2763         /*
2764          * XXX TODO: everything about the rate selection here is terrible!
2765          */
2766
2767         if (type == IEEE80211_FC0_TYPE_DATA) {
2768                 int i;
2769                 /* for data frames, use RS table */
2770                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2771                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2772                 ridx = in->in_ridx[i];
2773
2774                 /* This is the index into the programmed table */
2775                 tx->initial_rate_index = i;
2776                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2777                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2778                     "%s: start with i=%d, txrate %d\n",
2779                     __func__, i, iwm_rates[ridx].rate);
2780                 /* XXX no rate_n_flags? */
2781                 return &iwm_rates[ridx];
2782         }
2783
2784         /*
2785          * For non-data, use the lowest supported rate for the given
2786          * operational mode.
2787          *
2788          * Note: there may not be any rate control information available.
2789          * This driver currently assumes if we're transmitting data
2790          * frames, use the rate control table.  Grr.
2791          *
2792          * XXX TODO: use the configured rate for the traffic type!
2793          */
2794         if (ic->ic_curmode == IEEE80211_MODE_11A) {
2795                 /*
2796                  * XXX this assumes the mode is either 11a or not 11a;
2797                  * definitely won't work for 11n.
2798                  */
2799                 ridx = IWM_RIDX_OFDM;
2800         } else {
2801                 ridx = IWM_RIDX_CCK;
2802         }
2803
2804         rinfo = &iwm_rates[ridx];
2805
2806         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2807             __func__, ridx,
2808             rinfo->rate,
2809             !! (IWM_RIDX_IS_CCK(ridx))
2810             );
2811
2812         /* XXX TODO: hard-coded TX antenna? */
2813         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2814         if (IWM_RIDX_IS_CCK(ridx))
2815                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2816         /* XXX hard-coded tx rate */
2817         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2818
2819         return rinfo;
2820 }
2821
2822 #define TB0_SIZE 16
2823 static int
2824 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2825 {
2826         struct ieee80211com *ic = sc->sc_ic;
2827         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2828         struct iwm_node *in = (struct iwm_node *)ni;
2829         struct iwm_tx_ring *ring;
2830         struct iwm_tx_data *data;
2831         struct iwm_tfd *desc;
2832         struct iwm_device_cmd *cmd;
2833         struct iwm_tx_cmd *tx;
2834         struct ieee80211_frame *wh;
2835         struct ieee80211_key *k = NULL;
2836         const struct iwm_rate *rinfo;
2837         uint32_t flags;
2838         u_int hdrlen;
2839         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2840         int nsegs;
2841         uint8_t tid, type;
2842         int i, totlen, error, pad;
2843
2844         wh = mtod(m, struct ieee80211_frame *);
2845         hdrlen = ieee80211_anyhdrsize(wh);
2846         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2847         tid = 0;
2848         ring = &sc->txq[ac];
2849         desc = &ring->desc[ring->cur];
2850         memset(desc, 0, sizeof(*desc));
2851         data = &ring->data[ring->cur];
2852
2853         /* Fill out iwm_tx_cmd to send to the firmware */
2854         cmd = &ring->cmd[ring->cur];
2855         cmd->hdr.code = IWM_TX_CMD;
2856         cmd->hdr.flags = 0;
2857         cmd->hdr.qid = ring->qid;
2858         cmd->hdr.idx = ring->cur;
2859
2860         tx = (void *)cmd->data;
2861         memset(tx, 0, sizeof(*tx));
2862
2863         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2864
2865         /* Encrypt the frame if need be. */
2866         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2867                 /* Retrieve key for TX && do software encryption. */
2868                 k = ieee80211_crypto_encap(ni, m);
2869                 if (k == NULL) {
2870                         m_freem(m);
2871                         return (ENOBUFS);
2872                 }
2873                 /* 802.11 header may have moved. */
2874                 wh = mtod(m, struct ieee80211_frame *);
2875         }
2876
2877         if (ieee80211_radiotap_active_vap(vap)) {
2878                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2879
2880                 tap->wt_flags = 0;
2881                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2882                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2883                 tap->wt_rate = rinfo->rate;
2884                 tap->wt_hwqueue = ac;
2885                 if (k != NULL)
2886                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2887                 ieee80211_radiotap_tx(vap, m);
2888         }
2889
2890
2891         totlen = m->m_pkthdr.len;
2892
2893         flags = 0;
2894         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2895                 flags |= IWM_TX_CMD_FLG_ACK;
2896         }
2897
2898         if (type != IEEE80211_FC0_TYPE_DATA
2899             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2900             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2901                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2902         }
2903
2904         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2905             type != IEEE80211_FC0_TYPE_DATA)
2906                 tx->sta_id = sc->sc_aux_sta.sta_id;
2907         else
2908                 tx->sta_id = IWM_STATION_ID;
2909
2910         if (type == IEEE80211_FC0_TYPE_MGT) {
2911                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2912
2913                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2914                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2915                         tx->pm_frame_timeout = htole16(3);
2916                 else
2917                         tx->pm_frame_timeout = htole16(2);
2918         } else {
2919                 tx->pm_frame_timeout = htole16(0);
2920         }
2921
2922         if (hdrlen & 3) {
2923                 /* First segment length must be a multiple of 4. */
2924                 flags |= IWM_TX_CMD_FLG_MH_PAD;
2925                 pad = 4 - (hdrlen & 3);
2926         } else
2927                 pad = 0;
2928
2929         tx->driver_txop = 0;
2930         tx->next_frame_len = 0;
2931
2932         tx->len = htole16(totlen);
2933         tx->tid_tspec = tid;
2934         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2935
2936         /* Set physical address of "scratch area". */
2937         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2938         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2939
2940         /* Copy 802.11 header in TX command. */
2941         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2942
2943         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2944
2945         tx->sec_ctl = 0;
2946         tx->tx_flags |= htole32(flags);
2947
2948         /* Trim 802.11 header. */
2949         m_adj(m, hdrlen);
2950 #if defined(__DragonFly__)
2951         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2952                                             segs, IWM_MAX_SCATTER - 2,
2953                                             &nsegs, BUS_DMA_NOWAIT);
2954 #else
2955         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2956             segs, &nsegs, BUS_DMA_NOWAIT);
2957 #endif
2958         if (error && error != EFBIG) {
2959                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", error);
2960                 m_freem(m);
2961                 return error;
2962         }
2963         if (error) {
2964                 /* Too many DMA segments, linearize mbuf. */
2965                 if (m_defrag(m, M_NOWAIT)) {
2966                         m_freem(m);
2967                         return ENOBUFS;
2968                 }
2969 #if defined(__DragonFly__)
2970                 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2971                                                     segs, IWM_MAX_SCATTER - 2,
2972                                                     &nsegs, BUS_DMA_NOWAIT);
2973 #else
2974                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2975                     segs, &nsegs, BUS_DMA_NOWAIT);
2976 #endif
2977                 if (error) {
2978                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2979                             error);
2980                         m_freem(m);
2981                         return error;
2982                 }
2983         }
2984         data->m = m;
2985         data->in = in;
2986         data->done = 0;
2987
2988         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2989             "sending txd %p, in %p\n", data, data->in);
2990         KASSERT(data->in != NULL, ("node is NULL"));
2991
2992         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2993             "sending data: qid=%d idx=%d len=%d nsegs=%d\n",
2994             ring->qid, ring->cur, totlen, nsegs);
2995
2996         /* Fill TX descriptor. */
2997         desc->num_tbs = 2 + nsegs;
2998
2999         desc->tbs[0].lo = htole32(data->cmd_paddr);
3000         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3001             (TB0_SIZE << 4);
3002         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3003         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3004             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3005               + hdrlen + pad - TB0_SIZE) << 4);
3006
3007         /* Other DMA segments are for data payload. */
3008         for (i = 0; i < nsegs; i++) {
3009                 seg = &segs[i];
3010                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3011                 desc->tbs[i+2].hi_n_len = \
3012                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3013                     | ((seg->ds_len) << 4);
3014         }
3015
3016         bus_dmamap_sync(ring->data_dmat, data->map,
3017             BUS_DMASYNC_PREWRITE);
3018         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3019             BUS_DMASYNC_PREWRITE);
3020         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3021             BUS_DMASYNC_PREWRITE);
3022
3023 #if 0
3024         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3025 #endif
3026
3027         /* Kick TX ring. */
3028         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3029         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3030
3031         /* Mark TX ring as full if we reach a certain threshold. */
3032         if (++ring->queued > IWM_TX_RING_HIMARK) {
3033                 sc->qfullmsk |= 1 << ring->qid;
3034         }
3035
3036         return 0;
3037 }
3038
3039 static int
3040 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3041     const struct ieee80211_bpf_params *params)
3042 {
3043         struct ieee80211com *ic = ni->ni_ic;
3044         struct ifnet *ifp = ic->ic_ifp;
3045         struct iwm_softc *sc = ifp->if_softc;
3046         int error = 0;
3047
3048         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3049             "->%s begin\n", __func__);
3050
3051 #if defined(__DragonFly__)
3052         if ((ifp->if_flags & IFF_RUNNING) == 0) {
3053 #else
3054         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3055 #endif
3056                 ieee80211_free_node(ni);
3057                 m_freem(m);
3058                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3059                     "<-%s not RUNNING\n", __func__);
3060                 return (ENETDOWN);
3061         }
3062
3063         IWM_LOCK(sc);
3064         /* XXX fix this */
3065         if (params == NULL) {
3066                 error = iwm_tx(sc, m, ni, 0);
3067         } else {
3068                 error = iwm_tx(sc, m, ni, 0);
3069         }
3070         if (error != 0) {
3071                 /* NB: m is reclaimed on tx failure */
3072                 ieee80211_free_node(ni);
3073 #if defined(__DragonFly__)
3074                 ++ifp->if_oerrors;
3075 #else
3076                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3077 #endif
3078         }
3079         sc->sc_tx_timer = 5;
3080         IWM_UNLOCK(sc);
3081
3082         return (error);
3083 }
3084
3085 /*
3086  * mvm/tx.c
3087  */
3088
3089 #if 0
3090 /*
3091  * Note that there are transports that buffer frames before they reach
3092  * the firmware. This means that after flush_tx_path is called, the
3093  * queue might not be empty. The race-free way to handle this is to:
3094  * 1) set the station as draining
3095  * 2) flush the Tx path
3096  * 3) wait for the transport queues to be empty
3097  */
3098 int
3099 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3100 {
3101         struct iwm_tx_path_flush_cmd flush_cmd = {
3102                 .queues_ctl = htole32(tfd_msk),
3103                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3104         };
3105         int ret;
3106
3107         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3108             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3109             sizeof(flush_cmd), &flush_cmd);
3110         if (ret)
3111                 device_printf(sc->sc_dev,
3112                     "Flushing tx queue failed: %d\n", ret);
3113         return ret;
3114 }
3115 #endif
3116
3117 static void
3118 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3119         struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3120 {
3121         memset(cmd_v5, 0, sizeof(*cmd_v5));
3122
3123         cmd_v5->add_modify = cmd_v6->add_modify;
3124         cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3125         cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3126         IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3127         cmd_v5->sta_id = cmd_v6->sta_id;
3128         cmd_v5->modify_mask = cmd_v6->modify_mask;
3129         cmd_v5->station_flags = cmd_v6->station_flags;
3130         cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3131         cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3132         cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3133         cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3134         cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3135         cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3136         cmd_v5->assoc_id = cmd_v6->assoc_id;
3137         cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3138         cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3139 }
3140
3141 static int
3142 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3143         struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3144 {
3145         struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3146
3147         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3148                 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3149                     sizeof(*cmd), cmd, status);
3150         }
3151
3152         iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3153
3154         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3155             &cmd_v5, status);
3156 }
3157
3158 /* send station add/update command to firmware */
3159 static int
3160 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3161 {
3162         struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3163         int ret;
3164         uint32_t status;
3165
3166         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3167
3168         add_sta_cmd.sta_id = IWM_STATION_ID;
3169         add_sta_cmd.mac_id_n_color
3170             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3171                 IWM_DEFAULT_COLOR));
3172         if (!update) {
3173                 add_sta_cmd.tfd_queue_msk = htole32(0xf);
3174                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3175         }
3176         add_sta_cmd.add_modify = update ? 1 : 0;
3177         add_sta_cmd.station_flags_msk
3178             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3179
3180         status = IWM_ADD_STA_SUCCESS;
3181         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3182         if (ret)
3183                 return ret;
3184
3185         switch (status) {
3186         case IWM_ADD_STA_SUCCESS:
3187                 break;
3188         default:
3189                 ret = EIO;
3190                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3191                 break;
3192         }
3193
3194         return ret;
3195 }
3196
3197 static int
3198 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3199 {
3200         int ret;
3201
3202         ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3203         if (ret)
3204                 return ret;
3205
3206         return 0;
3207 }
3208
3209 static int
3210 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3211 {
3212         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3213 }
3214
3215 static int
3216 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3217         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3218 {
3219         struct iwm_mvm_add_sta_cmd_v6 cmd;
3220         int ret;
3221         uint32_t status;
3222
3223         memset(&cmd, 0, sizeof(cmd));
3224         cmd.sta_id = sta->sta_id;
3225         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3226
3227         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3228
3229         if (addr)
3230                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3231
3232         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3233         if (ret)
3234                 return ret;
3235
3236         switch (status) {
3237         case IWM_ADD_STA_SUCCESS:
3238                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3239                     "%s: Internal station added.\n", __func__);
3240                 return 0;
3241         default:
3242                 device_printf(sc->sc_dev,
3243                     "%s: Add internal station failed, status=0x%x\n",
3244                     __func__, status);
3245                 ret = EIO;
3246                 break;
3247         }
3248         return ret;
3249 }
3250
3251 static int
3252 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3253 {
3254         int ret;
3255
3256         sc->sc_aux_sta.sta_id = 3;
3257         sc->sc_aux_sta.tfd_queue_msk = 0;
3258
3259         ret = iwm_mvm_add_int_sta_common(sc,
3260             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3261
3262         if (ret)
3263                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3264         return ret;
3265 }
3266
3267 static int
3268 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3269 {
3270         struct iwm_time_quota_cmd cmd;
3271         int i, idx, ret, num_active_macs, quota, quota_rem;
3272         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3273         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3274         uint16_t id;
3275
3276         memset(&cmd, 0, sizeof(cmd));
3277
3278         /* currently, PHY ID == binding ID */
3279         if (in) {
3280                 id = in->in_phyctxt->id;
3281                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3282                 colors[id] = in->in_phyctxt->color;
3283
3284                 if (1)
3285                         n_ifs[id] = 1;
3286         }
3287
3288         /*
3289          * The FW's scheduling session consists of
3290          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3291          * equally between all the bindings that require quota
3292          */
3293         num_active_macs = 0;
3294         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3295                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3296                 num_active_macs += n_ifs[i];
3297         }
3298
3299         quota = 0;
3300         quota_rem = 0;
3301         if (num_active_macs) {
3302                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3303                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3304         }
3305
3306         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3307                 if (colors[i] < 0)
3308                         continue;
3309
3310                 cmd.quotas[idx].id_and_color =
3311                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3312
3313                 if (n_ifs[i] <= 0) {
3314                         cmd.quotas[idx].quota = htole32(0);
3315                         cmd.quotas[idx].max_duration = htole32(0);
3316                 } else {
3317                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3318                         cmd.quotas[idx].max_duration = htole32(0);
3319                 }
3320                 idx++;
3321         }
3322
3323         /* Give the remainder of the session to the first binding */
3324         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3325
3326         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3327             sizeof(cmd), &cmd);
3328         if (ret)
3329                 device_printf(sc->sc_dev,
3330                     "%s: Failed to send quota: %d\n", __func__, ret);
3331         return ret;
3332 }
3333
3334 /*
3335  * ieee80211 routines
3336  */
3337
3338 /*
3339  * Change to AUTH state in 80211 state machine.  Roughly matches what
3340  * Linux does in bss_info_changed().
3341  */
3342 static int
3343 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3344 {
3345         struct ieee80211_node *ni;
3346         struct iwm_node *in;
3347         struct iwm_vap *iv = IWM_VAP(vap);
3348         uint32_t duration;
3349         uint32_t min_duration;
3350         int error;
3351
3352         /*
3353          * XXX i have a feeling that the vap node is being
3354          * freed from underneath us. Grr.
3355          */
3356         ni = ieee80211_ref_node(vap->iv_bss);
3357         in = (struct iwm_node *) ni;
3358         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3359             "%s: called; vap=%p, bss ni=%p\n",
3360             __func__,
3361             vap,
3362             ni);
3363
3364         in->in_assoc = 0;
3365
3366         error = iwm_allow_mcast(vap, sc);
3367         if (error) {
3368                 device_printf(sc->sc_dev,
3369                     "%s: failed to set multicast\n", __func__);
3370                 goto out;
3371         }
3372
3373         /*
3374          * This is where it deviates from what Linux does.
3375          *
3376          * Linux iwlwifi doesn't reset the nic each time, nor does it
3377          * call ctxt_add() here.  Instead, it adds it during vap creation,
3378          * and always does does a mac_ctx_changed().
3379          *
3380          * The openbsd port doesn't attempt to do that - it reset things
3381          * at odd states and does the add here.
3382          *
3383          * So, until the state handling is fixed (ie, we never reset
3384          * the NIC except for a firmware failure, which should drag
3385          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3386          * contexts that are required), let's do a dirty hack here.
3387          */
3388         if (iv->is_uploaded) {
3389                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3390                         device_printf(sc->sc_dev,
3391                             "%s: failed to add MAC\n", __func__);
3392                         goto out;
3393                 }
3394         } else {
3395                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3396                         device_printf(sc->sc_dev,
3397                             "%s: failed to add MAC\n", __func__);
3398                         goto out;
3399                 }
3400         }
3401
3402         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3403             in->in_ni.ni_chan, 1, 1)) != 0) {
3404                 device_printf(sc->sc_dev,
3405                     "%s: failed add phy ctxt\n", __func__);
3406                 goto out;
3407         }
3408         in->in_phyctxt = &sc->sc_phyctxt[0];
3409
3410         if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3411                 device_printf(sc->sc_dev,
3412                     "%s: binding cmd\n", __func__);
3413                 goto out;
3414         }
3415
3416         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3417                 device_printf(sc->sc_dev,
3418                     "%s: failed to add MAC\n", __func__);
3419                 goto out;
3420         }
3421
3422         /* a bit superfluous? */
3423         while (sc->sc_auth_prot) {
3424 #if defined(__DragonFly__)
3425                 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmauth", 0);
3426 #else
3427                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmauth", 0);
3428 #endif
3429         }
3430         sc->sc_auth_prot = 1;
3431
3432         duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
3433             200 + in->in_ni.ni_intval);
3434         min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
3435             100 + in->in_ni.ni_intval);
3436         iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
3437
3438         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3439             "%s: waiting for auth_prot\n", __func__);
3440         while (sc->sc_auth_prot != 2) {
3441                 /*
3442                  * well, meh, but if the kernel is sleeping for half a
3443                  * second, we have bigger problems
3444                  */
3445                 if (sc->sc_auth_prot == 0) {
3446                         device_printf(sc->sc_dev,
3447                             "%s: missed auth window!\n", __func__);
3448                         error = ETIMEDOUT;
3449                         goto out;
3450                 } else if (sc->sc_auth_prot == -1) {
3451                         device_printf(sc->sc_dev,
3452                             "%s: no time event, denied!\n", __func__);
3453                         sc->sc_auth_prot = 0;
3454                         error = EAUTH;
3455                         goto out;
3456                 }
3457 #if defined(__DragonFly__)
3458                 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmau2", 0);
3459 #else
3460                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmau2", 0);
3461 #endif
3462         }
3463         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "<-%s\n", __func__);
3464         error = 0;
3465 out:
3466         ieee80211_free_node(ni);
3467         return (error);
3468 }
3469
3470 static int
3471 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3472 {
3473         struct iwm_node *in = (struct iwm_node *)vap->iv_bss;
3474         int error;
3475
3476         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3477                 device_printf(sc->sc_dev,
3478                     "%s: failed to update STA\n", __func__);
3479                 return error;
3480         }
3481
3482         in->in_assoc = 1;
3483         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3484                 device_printf(sc->sc_dev,
3485                     "%s: failed to update MAC\n", __func__);
3486                 return error;
3487         }
3488
3489         return 0;
3490 }
3491
3492 static int
3493 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3494 {
3495         /*
3496          * Ok, so *technically* the proper set of calls for going
3497          * from RUN back to SCAN is:
3498          *
3499          * iwm_mvm_power_mac_disable(sc, in);
3500          * iwm_mvm_mac_ctxt_changed(sc, in);
3501          * iwm_mvm_rm_sta(sc, in);
3502          * iwm_mvm_update_quotas(sc, NULL);
3503          * iwm_mvm_mac_ctxt_changed(sc, in);
3504          * iwm_mvm_binding_remove_vif(sc, in);
3505          * iwm_mvm_mac_ctxt_remove(sc, in);
3506          *
3507          * However, that freezes the device not matter which permutations
3508          * and modifications are attempted.  Obviously, this driver is missing
3509          * something since it works in the Linux driver, but figuring out what
3510          * is missing is a little more complicated.  Now, since we're going
3511          * back to nothing anyway, we'll just do a complete device reset.
3512          * Up your's, device!
3513          */
3514         //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3515         iwm_stop_device(sc);
3516         iwm_init_hw(sc);
3517         if (in)
3518                 in->in_assoc = 0;
3519         return 0;
3520
3521 #if 0
3522         int error;
3523
3524         iwm_mvm_power_mac_disable(sc, in);
3525
3526         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3527                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3528                 return error;
3529         }
3530
3531         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3532                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3533                 return error;
3534         }
3535         error = iwm_mvm_rm_sta(sc, in);
3536         in->in_assoc = 0;
3537         iwm_mvm_update_quotas(sc, NULL);
3538         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3539                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3540                 return error;
3541         }
3542         iwm_mvm_binding_remove_vif(sc, in);
3543
3544         iwm_mvm_mac_ctxt_remove(sc, in);
3545
3546         return error;
3547 #endif
3548 }
3549
3550 static struct ieee80211_node *
3551 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3552 {
3553         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3554             M_INTWAIT | M_ZERO);
3555 }
3556
3557 static void
3558 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3559 {
3560         struct ieee80211_node *ni = &in->in_ni;
3561         struct iwm_lq_cmd *lq = &in->in_lq;
3562         int nrates = ni->ni_rates.rs_nrates;
3563         int i, ridx, tab = 0;
3564         int txant = 0;
3565
3566         if (nrates > nitems(lq->rs_table)) {
3567                 device_printf(sc->sc_dev,
3568                     "%s: node supports %d rates, driver handles "
3569                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3570                 return;
3571         }
3572         if (nrates == 0) {
3573                 device_printf(sc->sc_dev,
3574                     "%s: node supports 0 rates, odd!\n", __func__);
3575                 return;
3576         }
3577
3578         /*
3579          * XXX .. and most of iwm_node is not initialised explicitly;
3580          * it's all just 0x0 passed to the firmware.
3581          */
3582
3583         /* first figure out which rates we should support */
3584         /* XXX TODO: this isn't 11n aware /at all/ */
3585         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3586         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3587             "%s: nrates=%d\n", __func__, nrates);
3588
3589         /*
3590          * Loop over nrates and populate in_ridx from the highest
3591          * rate to the lowest rate.  Remember, in_ridx[] has
3592          * IEEE80211_RATE_MAXSIZE entries!
3593          */
3594         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3595                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3596
3597                 /* Map 802.11 rate to HW rate index. */
3598                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3599                         if (iwm_rates[ridx].rate == rate)
3600                                 break;
3601                 if (ridx > IWM_RIDX_MAX) {
3602                         device_printf(sc->sc_dev,
3603                             "%s: WARNING: device rate for %d not found!\n",
3604                             __func__, rate);
3605                 } else {
3606                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3607                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
3608                             __func__,
3609                             i,
3610                             rate,
3611                             ridx);
3612                         in->in_ridx[i] = ridx;
3613                 }
3614         }
3615
3616         /* then construct a lq_cmd based on those */
3617         memset(lq, 0, sizeof(*lq));
3618         lq->sta_id = IWM_STATION_ID;
3619
3620         /*
3621          * are these used? (we don't do SISO or MIMO)
3622          * need to set them to non-zero, though, or we get an error.
3623          */
3624         lq->single_stream_ant_msk = 1;
3625         lq->dual_stream_ant_msk = 1;
3626
3627         /*
3628          * Build the actual rate selection table.
3629          * The lowest bits are the rates.  Additionally,
3630          * CCK needs bit 9 to be set.  The rest of the bits
3631          * we add to the table select the tx antenna
3632          * Note that we add the rates in the highest rate first
3633          * (opposite of ni_rates).
3634          */
3635         /*
3636          * XXX TODO: this should be looping over the min of nrates
3637          * and LQ_MAX_RETRY_NUM.  Sigh.
3638          */
3639         for (i = 0; i < nrates; i++) {
3640                 int nextant;
3641
3642                 if (txant == 0)
3643                         txant = IWM_FW_VALID_TX_ANT(sc);
3644                 nextant = 1<<(ffs(txant)-1);
3645                 txant &= ~nextant;
3646
3647                 /*
3648                  * Map the rate id into a rate index into
3649                  * our hardware table containing the
3650                  * configuration to use for this rate.
3651                  */
3652                 ridx = in->in_ridx[i];
3653                 tab = iwm_rates[ridx].plcp;
3654                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3655                 if (IWM_RIDX_IS_CCK(ridx))
3656                         tab |= IWM_RATE_MCS_CCK_MSK;
3657                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3658                     "station rate i=%d, rate=%d, hw=%x\n",
3659                     i, iwm_rates[ridx].rate, tab);
3660                 lq->rs_table[i] = htole32(tab);
3661         }
3662         /* then fill the rest with the lowest possible rate */
3663         for (i = nrates; i < nitems(lq->rs_table); i++) {
3664                 KASSERT(tab != 0, ("invalid tab"));
3665                 lq->rs_table[i] = htole32(tab);
3666         }
3667 }
3668
3669 static int
3670 iwm_media_change(struct ifnet *ifp)
3671 {
3672         struct iwm_softc *sc = ifp->if_softc;
3673         int error;
3674
3675         error = ieee80211_media_change(ifp);
3676         if (error != ENETRESET)
3677                 return error;
3678
3679 #if defined(__DragonFly__)
3680         if ((ifp->if_flags & IFF_UP) &&
3681             (ifp->if_flags & IFF_RUNNING)) {
3682 #else
3683         if ((ifp->if_flags & IFF_UP) &&
3684             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3685 #endif
3686                 iwm_stop(ifp, 0);
3687                 iwm_init(sc);
3688         }
3689         return error;
3690 }
3691
3692
3693 static int
3694 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3695 {
3696         struct iwm_vap *ivp = IWM_VAP(vap);
3697         struct ieee80211com *ic = vap->iv_ic;
3698         struct iwm_softc *sc = ic->ic_ifp->if_softc;
3699         struct iwm_node *in;
3700         int error;
3701
3702         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3703             "switching state %s -> %s\n",
3704             ieee80211_state_name[vap->iv_state],
3705             ieee80211_state_name[nstate]);
3706         IEEE80211_UNLOCK(ic);
3707         IWM_LOCK(sc);
3708
3709         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3710                 iwm_led_blink_stop(sc);
3711
3712         /* disable beacon filtering if we're hopping out of RUN */
3713         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3714                 iwm_mvm_disable_beacon_filter(sc);
3715
3716                 if (((in = (void *)vap->iv_bss) != NULL))
3717                         in->in_assoc = 0;
3718
3719                 iwm_release(sc, NULL);
3720
3721                 /*
3722                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
3723                  * above then the card will be completely reinitialized,
3724                  * so the driver must do everything necessary to bring the card
3725                  * from INIT to SCAN.
3726                  *
3727                  * Additionally, upon receiving deauth frame from AP,
3728                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3729                  * state. This will also fail with this driver, so bring the FSM
3730                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3731                  *
3732                  * XXX TODO: fix this for FreeBSD!
3733                  */
3734                 if (nstate == IEEE80211_S_SCAN ||
3735                     nstate == IEEE80211_S_AUTH ||
3736                     nstate == IEEE80211_S_ASSOC) {
3737                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3738                             "Force transition to INIT; MGT=%d\n", arg);
3739                         IWM_UNLOCK(sc);
3740                         IEEE80211_LOCK(ic);
3741                         vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3742                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3743                             "Going INIT->SCAN\n");
3744                         nstate = IEEE80211_S_SCAN;
3745                         IEEE80211_UNLOCK(ic);
3746                         IWM_LOCK(sc);
3747                 }
3748         }
3749
3750         switch (nstate) {
3751         case IEEE80211_S_INIT:
3752                 sc->sc_scanband = 0;
3753                 break;
3754
3755         case IEEE80211_S_AUTH:
3756                 if ((error = iwm_auth(vap, sc)) != 0) {
3757                         device_printf(sc->sc_dev,
3758                             "%s: could not move to auth state: %d\n",
3759                             __func__, error);
3760                         break;
3761                 }
3762                 break;
3763
3764         case IEEE80211_S_ASSOC:
3765                 if ((error = iwm_assoc(vap, sc)) != 0) {
3766                         device_printf(sc->sc_dev,
3767                             "%s: failed to associate: %d\n", __func__,
3768                             error);
3769                         break;
3770                 }
3771                 break;
3772
3773         case IEEE80211_S_RUN:
3774         {
3775                 struct iwm_host_cmd cmd = {
3776                         .id = IWM_LQ_CMD,
3777                         .len = { sizeof(in->in_lq), },
3778                         .flags = IWM_CMD_SYNC,
3779                 };
3780
3781                 /* Update the association state, now we have it all */
3782                 /* (eg associd comes in at this point */
3783                 error = iwm_assoc(vap, sc);
3784                 if (error != 0) {
3785                         device_printf(sc->sc_dev,
3786                             "%s: failed to update association state: %d\n",
3787                             __func__,
3788                             error);
3789                         break;
3790                 }
3791
3792                 in = (struct iwm_node *)vap->iv_bss;
3793                 iwm_mvm_power_mac_update_mode(sc, in);
3794                 iwm_mvm_enable_beacon_filter(sc, in);
3795                 iwm_mvm_update_quotas(sc, in);
3796                 iwm_setrates(sc, in);
3797
3798                 cmd.data[0] = &in->in_lq;
3799                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3800                         device_printf(sc->sc_dev,
3801                             "%s: IWM_LQ_CMD failed\n", __func__);
3802                 }
3803
3804                 iwm_mvm_led_enable(sc);
3805                 break;
3806         }
3807
3808         default:
3809                 break;
3810         }
3811         IWM_UNLOCK(sc);
3812         IEEE80211_LOCK(ic);
3813
3814         return (ivp->iv_newstate(vap, nstate, arg));
3815 }
3816
3817 void
3818 iwm_endscan_cb(void *arg, int pending)
3819 {
3820         struct iwm_softc *sc = arg;
3821         struct ieee80211com *ic = sc->sc_ic;
3822         int done;
3823         int error;
3824
3825         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3826             "%s: scan ended\n",
3827             __func__);
3828
3829         IWM_LOCK(sc);
3830         if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3831             sc->sc_nvm.sku_cap_band_52GHz_enable) {
3832                 done = 0;
3833                 if ((error = iwm_mvm_scan_request(sc,
3834                     IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3835                         device_printf(sc->sc_dev, "could not initiate scan\n");
3836                         done = 1;
3837                 }
3838         } else {
3839                 done = 1;
3840         }
3841
3842         if (done) {
3843                 IWM_UNLOCK(sc);
3844                 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3845                 IWM_LOCK(sc);
3846                 sc->sc_scanband = 0;
3847         }
3848         IWM_UNLOCK(sc);
3849 }
3850
3851 static int
3852 iwm_init_hw(struct iwm_softc *sc)
3853 {
3854         struct ieee80211com *ic = sc->sc_ic;
3855         int error, i, qid;
3856
3857         if ((error = iwm_start_hw(sc)) != 0) {
3858                 kprintf("iwm_start_hw: failed %d\n", error);
3859                 return error;
3860         }
3861
3862         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3863                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3864                 return error;
3865         }
3866
3867         /*
3868          * should stop and start HW since that INIT
3869          * image just loaded
3870          */
3871         iwm_stop_device(sc);
3872         if ((error = iwm_start_hw(sc)) != 0) {
3873                 device_printf(sc->sc_dev, "could not initialize hardware\n");
3874                 return error;
3875         }
3876
3877         /* omstart, this time with the regular firmware */
3878         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3879         if (error) {
3880                 device_printf(sc->sc_dev, "could not load firmware\n");
3881                 goto error;
3882         }
3883
3884         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3885                 device_printf(sc->sc_dev, "antenna config failed\n");
3886                 goto error;
3887         }
3888
3889         /* Send phy db control command and then phy db calibration*/
3890         if ((error = iwm_send_phy_db_data(sc)) != 0) {
3891                 device_printf(sc->sc_dev, "phy_db_data failed\n");
3892                 goto error;
3893         }
3894
3895         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3896                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3897                 goto error;
3898         }
3899
3900         /* Add auxiliary station for scanning */
3901         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3902                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
3903                 goto error;
3904         }
3905
3906         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3907                 /*
3908                  * The channel used here isn't relevant as it's
3909                  * going to be overwritten in the other flows.
3910                  * For now use the first channel we have.
3911                  */
3912                 if ((error = iwm_mvm_phy_ctxt_add(sc,
3913                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3914                         goto error;
3915         }
3916
3917         error = iwm_mvm_power_update_device(sc);
3918         if (error)
3919                 goto error;
3920
3921         /* Mark TX rings as active. */
3922         for (qid = 0; qid < 4; qid++) {
3923                 iwm_enable_txq(sc, qid, qid);
3924         }
3925
3926         return 0;
3927
3928  error:
3929         iwm_stop_device(sc);
3930         return error;
3931 }
3932
3933 /* Allow multicast from our BSSID. */
3934 static int
3935 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3936 {
3937         struct ieee80211_node *ni = vap->iv_bss;
3938         struct iwm_mcast_filter_cmd *cmd;
3939         size_t size;
3940         int error;
3941
3942         size = roundup(sizeof(*cmd), 4);
3943         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3944         if (cmd == NULL)
3945                 return ENOMEM;
3946         cmd->filter_own = 1;
3947         cmd->port_id = 0;
3948         cmd->count = 0;
3949         cmd->pass_all = 1;
3950         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3951
3952         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3953             IWM_CMD_SYNC, size, cmd);
3954         kfree(cmd, M_DEVBUF);
3955
3956         return (error);
3957 }
3958
3959 /*
3960  * ifnet interfaces
3961  */
3962
3963 static void
3964 iwm_init(void *arg)
3965 {
3966         struct iwm_softc *sc = arg;
3967
3968         IWM_LOCK(sc);
3969         iwm_init_locked(sc);
3970         IWM_UNLOCK(sc);
3971 }
3972
3973 static void
3974 iwm_init_locked(struct iwm_softc *sc)
3975 {
3976         struct ifnet *ifp = sc->sc_ifp;
3977         int error;
3978
3979         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3980                 return;
3981         }
3982         sc->sc_generation++;
3983         sc->sc_flags &= ~IWM_FLAG_STOPPED;
3984
3985         if ((error = iwm_init_hw(sc)) != 0) {
3986                 kprintf("iwm_init_hw failed %d\n", error);
3987                 iwm_stop_locked(ifp);
3988                 return;
3989         }
3990
3991         /*
3992          * Ok, firmware loaded and we are jogging
3993          */
3994 #if defined(__DragonFly__)
3995         ifq_clr_oactive(&ifp->if_snd);
3996         ifp->if_flags |= IFF_RUNNING;
3997 #else
3998         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3999         ifp->if_drv_flags |= IFF_DRV_RUNNING;
4000 #endif
4001         sc->sc_flags |= IWM_FLAG_HW_INITED;
4002         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4003 }
4004
4005 /*
4006  * Dequeue packets from sendq and call send.
4007  * mostly from iwn
4008  */
4009 #if defined(__DragonFly__)
4010 static void
4011 iwm_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4012 #else
4013 static void
4014 iwm_start(struct ifnet *ifp)
4015 #endif
4016 {
4017         struct iwm_softc *sc = ifp->if_softc;
4018
4019         IWM_LOCK(sc);
4020         iwm_start_locked(ifp);
4021         IWM_UNLOCK(sc);
4022 }
4023
4024 static void
4025 iwm_start_locked(struct ifnet *ifp)
4026 {
4027         struct iwm_softc *sc = ifp->if_softc;
4028         struct ieee80211_node *ni;
4029         struct mbuf *m;
4030         int ac = 0;
4031
4032 #if defined(__DragonFly__)
4033         if ((ifp->if_flags & IFF_RUNNING) == 0)
4034                 ifq_purge(&ifp->if_snd);
4035         if (ifq_is_oactive(&ifp->if_snd) || (ifp->if_flags & IFF_RUNNING) == 0)
4036                 return;
4037 #else
4038         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
4039                 return;
4040 #endif
4041
4042         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4043         for (;;) {
4044                 /* why isn't this done per-queue? */
4045                 if (sc->qfullmsk != 0) {
4046 #if defined(__DragonFly__)
4047                         ifq_set_oactive(&ifp->if_snd);
4048 #else
4049                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4050 #endif
4051                         break;
4052                 }
4053                 m = ifq_dequeue(&ifp->if_snd);
4054                 if (!m)
4055                         break;
4056                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4057                 if (iwm_tx(sc, m, ni, ac) != 0) {
4058                         ieee80211_free_node(ni);
4059 #if defined(__DragonFly__)
4060                         ++ifp->if_oerrors;
4061 #else
4062                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4063 #endif
4064                         continue;
4065                 }
4066
4067                 if (ifp->if_flags & IFF_UP) {
4068                         sc->sc_tx_timer = 15;
4069                 }
4070         }
4071         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4072 }
4073
4074 static void
4075 iwm_stop(struct ifnet *ifp, int disable)
4076 {
4077         struct iwm_softc *sc = ifp->if_softc;
4078
4079         IWM_LOCK(sc);
4080         iwm_stop_locked(ifp);
4081         IWM_UNLOCK(sc);
4082 }
4083
4084 static void
4085 iwm_stop_locked(struct ifnet *ifp)
4086 {
4087         struct iwm_softc *sc = ifp->if_softc;
4088
4089         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4090         sc->sc_flags |= IWM_FLAG_STOPPED;
4091         sc->sc_generation++;
4092         sc->sc_scanband = 0;
4093         sc->sc_auth_prot = 0;
4094 #if defined(__DragonFly__)
4095         ifq_clr_oactive(&ifp->if_snd);
4096         ifp->if_flags &= ~IFF_RUNNING;
4097 #else
4098         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4099 #endif
4100         iwm_led_blink_stop(sc);
4101         sc->sc_tx_timer = 0;
4102         iwm_stop_device(sc);
4103 }
4104
4105 static void
4106 iwm_watchdog(void *arg)
4107 {
4108         struct iwm_softc *sc = arg;
4109         struct ifnet *ifp = sc->sc_ifp;
4110
4111 #if defined(__DragonFly__)
4112 #else
4113         KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4114 #endif
4115         if (sc->sc_tx_timer > 0) {
4116                 if (--sc->sc_tx_timer == 0) {
4117                         device_printf(sc->sc_dev, "device timeout\n");
4118 #ifdef IWM_DEBUG
4119                         iwm_nic_error(sc);
4120 #endif
4121                         ifp->if_flags &= ~IFF_UP;
4122                         iwm_stop_locked(ifp);
4123 #if defined(__DragonFly__)
4124                         ++ifp->if_oerrors;
4125 #else
4126                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4127 #endif
4128                         return;
4129                 }
4130         }
4131         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4132 }
4133
4134 #if defined(__DragonFly__)
4135 static int
4136 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data, struct ucred *cred)
4137 #else
4138 static int
4139 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
4140 #endif
4141 {
4142         struct iwm_softc *sc = ifp->if_softc;
4143         struct ieee80211com *ic = sc->sc_ic;
4144         struct ifreq *ifr = (struct ifreq *) data;
4145         int error = 0, startall = 0;
4146
4147         switch (cmd) {
4148         case SIOCGIFADDR:
4149                 error = ether_ioctl(ifp, cmd, data);
4150                 break;
4151         case SIOCGIFMEDIA:
4152                 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4153                 break;
4154         case SIOCSIFFLAGS:
4155                 IWM_LOCK(sc);
4156 #if defined(__DragonFly__)
4157                 if (ifp->if_flags & IFF_UP) {
4158                         if (!(ifp->if_flags & IFF_RUNNING)) {
4159                                 iwm_init_locked(sc);
4160                                 startall = 1;
4161                         }
4162                 } else {
4163                         if (ifp->if_flags & IFF_RUNNING)
4164                                 iwm_stop_locked(ifp);
4165                 }
4166 #else
4167                 if (ifp->if_flags & IFF_UP) {
4168                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4169                                 iwm_init_locked(sc);
4170                                 startall = 1;
4171                         }
4172                 } else {
4173                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4174                                 iwm_stop_locked(ifp);
4175                 }
4176 #endif
4177                 IWM_UNLOCK(sc);
4178                 if (startall)
4179                         ieee80211_start_all(ic);
4180
4181                 break;
4182         default:
4183                 error = EINVAL;
4184                 break;
4185         }
4186
4187         return error;
4188 }
4189
4190 /*
4191  * The interrupt side of things
4192  */
4193
4194 /*
4195  * error dumping routines are from iwlwifi/mvm/utils.c
4196  */
4197
4198 /*
4199  * Note: This structure is read from the device with IO accesses,
4200  * and the reading already does the endian conversion. As it is
4201  * read with uint32_t-sized accesses, any members with a different size
4202  * need to be ordered correctly though!
4203  */
4204 struct iwm_error_event_table {
4205         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4206         uint32_t error_id;              /* type of error */
4207         uint32_t pc;                    /* program counter */
4208         uint32_t blink1;                /* branch link */
4209         uint32_t blink2;                /* branch link */
4210         uint32_t ilink1;                /* interrupt link */
4211         uint32_t ilink2;                /* interrupt link */
4212         uint32_t data1;         /* error-specific data */
4213         uint32_t data2;         /* error-specific data */
4214         uint32_t data3;         /* error-specific data */
4215         uint32_t bcon_time;             /* beacon timer */
4216         uint32_t tsf_low;               /* network timestamp function timer */
4217         uint32_t tsf_hi;                /* network timestamp function timer */
4218         uint32_t gp1;           /* GP1 timer register */
4219         uint32_t gp2;           /* GP2 timer register */
4220         uint32_t gp3;           /* GP3 timer register */
4221         uint32_t ucode_ver;             /* uCode version */
4222         uint32_t hw_ver;                /* HW Silicon version */
4223         uint32_t brd_ver;               /* HW board version */
4224         uint32_t log_pc;                /* log program counter */
4225         uint32_t frame_ptr;             /* frame pointer */
4226         uint32_t stack_ptr;             /* stack pointer */
4227         uint32_t hcmd;          /* last host command header */
4228         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4229                                  * rxtx_flag */
4230         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4231                                  * host_flag */
4232         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4233                                  * enc_flag */
4234         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4235                                  * time_flag */
4236         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4237                                  * wico interrupt */
4238         uint32_t isr_pref;              /* isr status register LMPM_NIC_PREF_STAT */
4239         uint32_t wait_event;            /* wait event() caller address */
4240         uint32_t l2p_control;   /* L2pControlField */
4241         uint32_t l2p_duration;  /* L2pDurationField */
4242         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4243         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4244         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4245                                  * (LMPM_PMG_SEL) */
4246         uint32_t u_timestamp;   /* indicate when the date and time of the
4247                                  * compilation */
4248         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4249 } __packed;
4250
4251 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4252 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4253
4254 #ifdef IWM_DEBUG
4255 struct {
4256         const char *name;
4257         uint8_t num;
4258 } advanced_lookup[] = {
4259         { "NMI_INTERRUPT_WDG", 0x34 },
4260         { "SYSASSERT", 0x35 },
4261         { "UCODE_VERSION_MISMATCH", 0x37 },
4262         { "BAD_COMMAND", 0x38 },
4263         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4264         { "FATAL_ERROR", 0x3D },
4265         { "NMI_TRM_HW_ERR", 0x46 },
4266         { "NMI_INTERRUPT_TRM", 0x4C },
4267         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4268         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4269         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4270         { "NMI_INTERRUPT_HOST", 0x66 },
4271         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4272         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4273         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4274         { "ADVANCED_SYSASSERT", 0 },
4275 };
4276
4277 static const char *
4278 iwm_desc_lookup(uint32_t num)
4279 {
4280         int i;
4281
4282         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4283                 if (advanced_lookup[i].num == num)
4284                         return advanced_lookup[i].name;
4285
4286         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4287         return advanced_lookup[i].name;
4288 }
4289
4290 /*
4291  * Support for dumping the error log seemed like a good idea ...
4292  * but it's mostly hex junk and the only sensible thing is the
4293  * hw/ucode revision (which we know anyway).  Since it's here,
4294  * I'll just leave it in, just in case e.g. the Intel guys want to
4295  * help us decipher some "ADVANCED_SYSASSERT" later.
4296  */
4297 static void
4298 iwm_nic_error(struct iwm_softc *sc)
4299 {
4300         struct iwm_error_event_table table;
4301         uint32_t base;
4302
4303         device_printf(sc->sc_dev, "dumping device error log\n");
4304         base = sc->sc_uc.uc_error_event_table;
4305         if (base < 0x800000 || base >= 0x80C000) {
4306                 device_printf(sc->sc_dev,
4307                     "Not valid error log pointer 0x%08x\n", base);
4308                 return;
4309         }
4310
4311         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4312                 device_printf(sc->sc_dev, "reading errlog failed\n");
4313                 return;
4314         }
4315
4316         if (!table.valid) {
4317                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4318                 return;
4319         }
4320
4321         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4322                 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4323                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4324                     sc->sc_flags, table.valid);
4325         }
4326
4327         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4328                 iwm_desc_lookup(table.error_id));
4329         device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4330         device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4331         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4332         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4333         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4334         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4335         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4336         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4337         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4338         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4339         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4340         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4341         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4342         device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4343         device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4344         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4345         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4346         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4347         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4348         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4349         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4350         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4351         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4352         device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4353         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4354         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4355         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4356         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4357         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4358         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4359         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4360         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4361 }
4362 #endif
4363
4364 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4365 do {                                                                    \
4366         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4367         _var_ = (void *)((_pkt_)+1);                                    \
4368 } while (/*CONSTCOND*/0)
4369
4370 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4371 do {                                                                    \
4372         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4373         _ptr_ = (void *)((_pkt_)+1);                                    \
4374 } while (/*CONSTCOND*/0)
4375
4376 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4377
4378 /*
4379  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4380  * Basic structure from if_iwn
4381  */
4382 static void
4383 iwm_notif_intr(struct iwm_softc *sc)
4384 {
4385         uint16_t hw;
4386
4387         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4388             BUS_DMASYNC_POSTREAD);
4389
4390         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4391
4392         /*
4393          * Process responses
4394          */
4395         while (sc->rxq.cur != hw) {
4396                 struct iwm_rx_ring *ring = &sc->rxq;
4397                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4398                 struct iwm_rx_packet *pkt;
4399                 struct iwm_cmd_response *cresp;
4400                 int qid, idx;
4401
4402                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4403                     BUS_DMASYNC_POSTREAD);
4404                 pkt = mtod(data->m, struct iwm_rx_packet *);
4405
4406                 qid = pkt->hdr.qid & ~0x80;
4407                 idx = pkt->hdr.idx;
4408
4409                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4410                     "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4411                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4412                     pkt->hdr.code, sc->rxq.cur, hw);
4413
4414                 /*
4415                  * randomly get these from the firmware, no idea why.
4416                  * they at least seem harmless, so just ignore them for now
4417                  */
4418                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4419                     || pkt->len_n_flags == htole32(0x55550000))) {
4420                         ADVANCE_RXQ(sc);
4421                         continue;
4422                 }
4423
4424                 switch (pkt->hdr.code) {
4425                 case IWM_REPLY_RX_PHY_CMD:
4426                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4427                         break;
4428
4429                 case IWM_REPLY_RX_MPDU_CMD:
4430                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4431                         break;
4432
4433                 case IWM_TX_CMD:
4434                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
4435                         break;
4436
4437                 case IWM_MISSED_BEACONS_NOTIFICATION: {
4438                         struct iwm_missed_beacons_notif *resp;
4439                         int missed;
4440
4441                         /* XXX look at mac_id to determine interface ID */
4442                         struct ieee80211com *ic = sc->sc_ic;
4443                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4444
4445                         SYNC_RESP_STRUCT(resp, pkt);
4446                         missed = le32toh(resp->consec_missed_beacons);
4447
4448                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4449                             "%s: MISSED_BEACON: mac_id=%d, "
4450                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4451                             "num_rx=%d\n",
4452                             __func__,
4453                             le32toh(resp->mac_id),
4454                             le32toh(resp->consec_missed_beacons_since_last_rx),
4455                             le32toh(resp->consec_missed_beacons),
4456                             le32toh(resp->num_expected_beacons),
4457                             le32toh(resp->num_recvd_beacons));
4458
4459                         /* Be paranoid */
4460                         if (vap == NULL)
4461                                 break;
4462
4463                         /* XXX no net80211 locking? */
4464                         if (vap->iv_state == IEEE80211_S_RUN &&
4465                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4466                                 if (missed > vap->iv_bmissthreshold) {
4467                                         /* XXX bad locking; turn into task */
4468                                         IWM_UNLOCK(sc);
4469                                         ieee80211_beacon_miss(ic);
4470                                         IWM_LOCK(sc);
4471                                 }
4472                         }
4473
4474                         break; }
4475
4476                 case IWM_MVM_ALIVE: {
4477                         struct iwm_mvm_alive_resp *resp;
4478                         SYNC_RESP_STRUCT(resp, pkt);
4479
4480                         sc->sc_uc.uc_error_event_table
4481                             = le32toh(resp->error_event_table_ptr);
4482                         sc->sc_uc.uc_log_event_table
4483                             = le32toh(resp->log_event_table_ptr);
4484                         sc->sched_base = le32toh(resp->scd_base_ptr);
4485                         sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4486
4487                         sc->sc_uc.uc_intr = 1;
4488                         wakeup(&sc->sc_uc);
4489                         break; }
4490
4491                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4492                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
4493                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
4494
4495                         iwm_phy_db_set_section(sc, phy_db_notif);
4496
4497                         break; }
4498
4499                 case IWM_STATISTICS_NOTIFICATION: {
4500                         struct iwm_notif_statistics *stats;
4501                         SYNC_RESP_STRUCT(stats, pkt);
4502                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4503                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
4504                         break; }
4505
4506                 case IWM_NVM_ACCESS_CMD:
4507                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4508                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4509                                     BUS_DMASYNC_POSTREAD);
4510                                 memcpy(sc->sc_cmd_resp,
4511                                     pkt, sizeof(sc->sc_cmd_resp));
4512                         }
4513                         break;
4514
4515                 case IWM_PHY_CONFIGURATION_CMD:
4516                 case IWM_TX_ANT_CONFIGURATION_CMD:
4517                 case IWM_ADD_STA:
4518                 case IWM_MAC_CONTEXT_CMD:
4519                 case IWM_REPLY_SF_CFG_CMD:
4520                 case IWM_POWER_TABLE_CMD:
4521                 case IWM_PHY_CONTEXT_CMD:
4522                 case IWM_BINDING_CONTEXT_CMD:
4523                 case IWM_TIME_EVENT_CMD:
4524                 case IWM_SCAN_REQUEST_CMD:
4525                 case IWM_REPLY_BEACON_FILTERING_CMD:
4526                 case IWM_MAC_PM_POWER_TABLE:
4527                 case IWM_TIME_QUOTA_CMD:
4528                 case IWM_REMOVE_STA:
4529                 case IWM_TXPATH_FLUSH:
4530                 case IWM_LQ_CMD:
4531                         SYNC_RESP_STRUCT(cresp, pkt);
4532                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4533                                 memcpy(sc->sc_cmd_resp,
4534                                     pkt, sizeof(*pkt)+sizeof(*cresp));
4535                         }
4536                         break;
4537
4538                 /* ignore */
4539                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4540                         break;
4541
4542                 case IWM_INIT_COMPLETE_NOTIF:
4543                         sc->sc_init_complete = 1;
4544                         wakeup(&sc->sc_init_complete);
4545                         break;
4546
4547                 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4548                         struct iwm_scan_complete_notif *notif;
4549                         SYNC_RESP_STRUCT(notif, pkt);
4550                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4551                         break; }
4552
4553                 case IWM_REPLY_ERROR: {
4554                         struct iwm_error_resp *resp;
4555                         SYNC_RESP_STRUCT(resp, pkt);
4556
4557                         device_printf(sc->sc_dev,
4558                             "firmware error 0x%x, cmd 0x%x\n",
4559                             le32toh(resp->error_type),
4560                             resp->cmd_id);
4561                         break; }
4562
4563                 case IWM_TIME_EVENT_NOTIFICATION: {
4564                         struct iwm_time_event_notif *notif;
4565                         SYNC_RESP_STRUCT(notif, pkt);
4566
4567                         if (notif->status) {
4568                                 if (le32toh(notif->action) &
4569                                     IWM_TE_V2_NOTIF_HOST_EVENT_START)
4570                                         sc->sc_auth_prot = 2;
4571                                 else
4572                                         sc->sc_auth_prot = 0;
4573                         } else {
4574                                 sc->sc_auth_prot = -1;
4575                         }
4576                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4577                             "%s: time event notification auth_prot=%d\n",
4578                                 __func__, sc->sc_auth_prot);
4579
4580                         wakeup(&sc->sc_auth_prot);
4581                         break; }
4582
4583                 case IWM_MCAST_FILTER_CMD:
4584                         break;
4585
4586                 default:
4587                         device_printf(sc->sc_dev,
4588                             "cmd %04x frame %d/%d %x UNHANDLED (this should "
4589                             "not happen)\n",
4590                             pkt->hdr.code, qid, idx,
4591                             pkt->len_n_flags);
4592                         panic("unhandled command");
4593                         break;
4594                 }
4595
4596                 /*
4597                  * Why test bit 0x80?  The Linux driver:
4598                  *
4599                  * There is one exception:  uCode sets bit 15 when it
4600                  * originates the response/notification, i.e. when the
4601                  * response/notification is not a direct response to a
4602                  * command sent by the driver.  For example, uCode issues
4603                  * IWM_REPLY_RX when it sends a received frame to the driver;
4604                  * it is not a direct response to any driver command.
4605                  *
4606                  * Ok, so since when is 7 == 15?  Well, the Linux driver
4607                  * uses a slightly different format for pkt->hdr, and "qid"
4608                  * is actually the upper byte of a two-byte field.
4609                  */
4610                 if (!(pkt->hdr.qid & (1 << 7))) {
4611                         iwm_cmd_done(sc, pkt);
4612                 }
4613
4614                 ADVANCE_RXQ(sc);
4615         }
4616
4617         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4618             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4619
4620         /*
4621          * Tell the firmware what we have processed.
4622          * Seems like the hardware gets upset unless we align
4623          * the write by 8??
4624          */
4625         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4626         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4627 }
4628
4629 static void
4630 iwm_intr(void *arg)
4631 {
4632         struct iwm_softc *sc = arg;
4633         struct ifnet *ifp = sc->sc_ifp;
4634         int handled = 0;
4635         int r1, r2, rv = 0;
4636         int isperiodic = 0;
4637
4638 #if defined(__DragonFly__)
4639         if (sc->sc_mem == NULL) {
4640                 kprintf("iwm_intr: detached\n");
4641                 return;
4642         }
4643 #endif
4644         IWM_LOCK(sc);
4645         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4646
4647         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4648                 uint32_t *ict = sc->ict_dma.vaddr;
4649                 int tmp;
4650
4651                 tmp = htole32(ict[sc->ict_cur]);
4652                 if (!tmp)
4653                         goto out_ena;
4654
4655                 /*
4656                  * ok, there was something.  keep plowing until we have all.
4657                  */
4658                 r1 = r2 = 0;
4659                 while (tmp) {
4660                         r1 |= tmp;
4661                         ict[sc->ict_cur] = 0;
4662                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4663                         tmp = htole32(ict[sc->ict_cur]);
4664                 }
4665
4666                 /* this is where the fun begins.  don't ask */
4667                 if (r1 == 0xffffffff)
4668                         r1 = 0;
4669
4670                 /* i am not expected to understand this */
4671                 if (r1 & 0xc0000)
4672                         r1 |= 0x8000;
4673                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4674         } else {
4675                 r1 = IWM_READ(sc, IWM_CSR_INT);
4676                 /* "hardware gone" (where, fishing?) */
4677                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4678                         goto out;
4679                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4680         }
4681         if (r1 == 0 && r2 == 0) {
4682                 goto out_ena;
4683         }
4684
4685         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4686
4687         /* ignored */
4688         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4689
4690         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4691 #ifdef IWM_DEBUG
4692                 int i;
4693                 struct ieee80211com *ic = sc->sc_ic;
4694                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4695
4696                 iwm_nic_error(sc);
4697
4698                 /* Dump driver status (TX and RX rings) while we're here. */
4699                 device_printf(sc->sc_dev, "driver status:\n");
4700                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4701                         struct iwm_tx_ring *ring = &sc->txq[i];
4702                         device_printf(sc->sc_dev,
4703                             "  tx ring %2d: qid=%-2d cur=%-3d "
4704                             "queued=%-3d\n",
4705                             i, ring->qid, ring->cur, ring->queued);
4706                 }
4707                 device_printf(sc->sc_dev,
4708                     "  rx ring: cur=%d\n", sc->rxq.cur);
4709                 device_printf(sc->sc_dev,
4710                     "  802.11 state %d\n", vap->iv_state);
4711 #endif
4712
4713                 device_printf(sc->sc_dev, "fatal firmware error\n");
4714                 ifp->if_flags &= ~IFF_UP;
4715                 iwm_stop_locked(ifp);
4716                 rv = 1;
4717                 goto out;
4718
4719         }
4720
4721         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4722                 handled |= IWM_CSR_INT_BIT_HW_ERR;
4723                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4724                 ifp->if_flags &= ~IFF_UP;
4725                 iwm_stop_locked(ifp);
4726                 rv = 1;
4727                 goto out;
4728         }
4729
4730         /* firmware chunk loaded */
4731         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4732                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4733                 handled |= IWM_CSR_INT_BIT_FH_TX;
4734                 sc->sc_fw_chunk_done = 1;
4735                 wakeup(&sc->sc_fw);
4736         }
4737
4738         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4739                 handled |= IWM_CSR_INT_BIT_RF_KILL;
4740                 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
4741                         device_printf(sc->sc_dev,
4742                             "%s: rfkill switch, disabling interface\n",
4743                             __func__);
4744                         ifp->if_flags &= ~IFF_UP;
4745                         iwm_stop_locked(ifp);
4746                 }
4747         }
4748
4749         /*
4750          * The Linux driver uses periodic interrupts to avoid races.
4751          * We cargo-cult like it's going out of fashion.
4752          */
4753         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4754                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4755                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4756                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4757                         IWM_WRITE_1(sc,
4758                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4759                 isperiodic = 1;
4760         }
4761
4762         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4763                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4764                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4765
4766                 iwm_notif_intr(sc);
4767
4768                 /* enable periodic interrupt, see above */
4769                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4770                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4771                             IWM_CSR_INT_PERIODIC_ENA);
4772         }
4773
4774         if (__predict_false(r1 & ~handled))
4775                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4776                     "%s: unhandled interrupts: %x\n", __func__, r1);
4777         rv = 1;
4778
4779  out_ena:
4780         iwm_restore_interrupts(sc);
4781  out:
4782         IWM_UNLOCK(sc);
4783         return;
4784 }
4785
4786 /*
4787  * Autoconf glue-sniffing
4788  */
4789 #define PCI_VENDOR_INTEL                0x8086
4790 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
4791 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
4792 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
4793 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
4794 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
4795 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
4796
4797 static const struct iwm_devices {
4798         uint16_t        device;
4799         const char      *name;
4800 } iwm_devices[] = {
4801         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4802         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4803         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4804         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4805         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4806         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4807 };
4808
4809 static int
4810 iwm_probe(device_t dev)
4811 {
4812         int i;
4813
4814         for (i = 0; i < nitems(iwm_devices); i++) {
4815                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4816                     pci_get_device(dev) == iwm_devices[i].device) {
4817                         device_set_desc(dev, iwm_devices[i].name);
4818                         return (BUS_PROBE_DEFAULT);
4819                 }
4820         }
4821
4822         return (ENXIO);
4823 }
4824
4825 static int
4826 iwm_dev_check(device_t dev)
4827 {
4828         struct iwm_softc *sc;
4829
4830         sc = device_get_softc(dev);
4831
4832         switch (pci_get_device(dev)) {
4833         case PCI_PRODUCT_INTEL_WL_3160_1:
4834         case PCI_PRODUCT_INTEL_WL_3160_2:
4835                 sc->sc_fwname = "iwm3160fw";
4836                 sc->host_interrupt_operation_mode = 1;
4837                 return (0);
4838         case PCI_PRODUCT_INTEL_WL_7260_1:
4839         case PCI_PRODUCT_INTEL_WL_7260_2:
4840                 sc->sc_fwname = "iwm7260fw";
4841                 sc->host_interrupt_operation_mode = 1;
4842                 return (0);
4843         case PCI_PRODUCT_INTEL_WL_7265_1:
4844         case PCI_PRODUCT_INTEL_WL_7265_2:
4845                 sc->sc_fwname = "iwm7265fw";
4846                 sc->host_interrupt_operation_mode = 0;
4847                 return (0);
4848         default:
4849                 device_printf(dev, "unknown adapter type\n");
4850                 return ENXIO;
4851         }
4852 }
4853
4854 static int
4855 iwm_pci_attach(device_t dev)
4856 {
4857         struct iwm_softc *sc;
4858         int count, error, rid;
4859         uint16_t reg;
4860 #if defined(__DragonFly__)
4861         int irq_flags;
4862 #endif
4863
4864         sc = device_get_softc(dev);
4865
4866         /* Clear device-specific "PCI retry timeout" register (41h). */
4867         reg = pci_read_config(dev, 0x40, sizeof(reg));
4868         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4869
4870         /* Enable bus-mastering and hardware bug workaround. */
4871         pci_enable_busmaster(dev);
4872         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4873         /* if !MSI */
4874         if (reg & PCIM_STATUS_INTxSTATE) {
4875                 reg &= ~PCIM_STATUS_INTxSTATE;
4876         }
4877         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4878
4879         rid = PCIR_BAR(0);
4880         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4881             RF_ACTIVE);
4882         if (sc->sc_mem == NULL) {
4883                 device_printf(sc->sc_dev, "can't map mem space\n");
4884                 return (ENXIO);
4885         }
4886         sc->sc_st = rman_get_bustag(sc->sc_mem);
4887         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4888
4889         /* Install interrupt handler. */
4890         count = 1;
4891         rid = 0;
4892 #if defined(__DragonFly__)
4893         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4894         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4895 #else
4896         if (pci_alloc_msi(dev, &count) == 0)
4897                 rid = 1;
4898         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4899             (rid != 0 ? 0 : RF_SHAREABLE));
4900 #endif
4901         if (sc->sc_irq == NULL) {
4902                 device_printf(dev, "can't map interrupt\n");
4903                         return (ENXIO);
4904         }
4905 #if defined(__DragonFly__)
4906         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4907                                iwm_intr, sc, &sc->sc_ih,
4908                                &wlan_global_serializer);
4909 #else
4910         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4911             NULL, iwm_intr, sc, &sc->sc_ih);
4912 #endif
4913         if (sc->sc_ih == NULL) {
4914                 device_printf(dev, "can't establish interrupt");
4915 #if defined(__DragonFly__)
4916                 pci_release_msi(dev);
4917 #endif
4918                         return (ENXIO);
4919         }
4920         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4921
4922         return (0);
4923 }
4924
4925 static void
4926 iwm_pci_detach(device_t dev)
4927 {
4928         struct iwm_softc *sc = device_get_softc(dev);
4929
4930         if (sc->sc_irq != NULL) {
4931                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4932                 bus_release_resource(dev, SYS_RES_IRQ,
4933                     rman_get_rid(sc->sc_irq), sc->sc_irq);
4934                 pci_release_msi(dev);
4935 #if defined(__DragonFly__)
4936                 sc->sc_irq = NULL;
4937 #endif
4938         }
4939         if (sc->sc_mem != NULL) {
4940                 bus_release_resource(dev, SYS_RES_MEMORY,
4941                     rman_get_rid(sc->sc_mem), sc->sc_mem);
4942 #if defined(__DragonFly__)
4943                 sc->sc_mem = NULL;
4944 #endif
4945         }
4946 }
4947
4948
4949
4950 static int
4951 iwm_attach(device_t dev)
4952 {
4953         struct iwm_softc *sc;
4954         struct ieee80211com *ic;
4955         struct ifnet *ifp;
4956         int error;
4957         int txq_i, i;
4958
4959         sc = device_get_softc(dev);
4960         sc->sc_dev = dev;
4961 #if defined(__DragonFly__)
4962         lockinit(&sc->sc_lk, "iwm_lk", 0, 0);
4963         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4964 #else
4965         mtx_init(&sc->sc_mtx, "iwm_mtx", MTX_DEF, 0);
4966         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4967 #endif
4968         callout_init(&sc->sc_led_blink_to);
4969         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4970         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4971             taskqueue_thread_enqueue, &sc->sc_tq);
4972 #if defined(__DragonFly__)
4973         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
4974                                         -1, "iwm_taskq");
4975 #else
4976         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4977 #endif
4978         if (error != 0) {
4979                 device_printf(dev, "can't start threads, error %d\n",
4980                     error);
4981                 goto fail;
4982         }
4983
4984         /* PCI attach */
4985         error = iwm_pci_attach(dev);
4986         if (error != 0)
4987                 goto fail;
4988
4989         sc->sc_wantresp = -1;
4990
4991         /* Check device type */
4992         error = iwm_dev_check(dev);
4993         if (error != 0)
4994                 goto fail;
4995
4996         sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4997
4998         /*
4999          * We now start fiddling with the hardware
5000          */
5001         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5002         if (iwm_prepare_card_hw(sc) != 0) {
5003                 device_printf(dev, "could not initialize hardware\n");
5004                 goto fail;
5005         }
5006
5007         /* Allocate DMA memory for firmware transfers. */
5008         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5009                 device_printf(dev, "could not allocate memory for firmware\n");
5010                 goto fail;
5011         }
5012
5013         /* Allocate "Keep Warm" page. */
5014         if ((error = iwm_alloc_kw(sc)) != 0) {
5015                 device_printf(dev, "could not allocate keep warm page\n");
5016                 goto fail;
5017         }
5018
5019         /* We use ICT interrupts */
5020         if ((error = iwm_alloc_ict(sc)) != 0) {
5021                 device_printf(dev, "could not allocate ICT table\n");
5022                 goto fail;
5023         }
5024
5025         /* Allocate TX scheduler "rings". */
5026         if ((error = iwm_alloc_sched(sc)) != 0) {
5027                 device_printf(dev, "could not allocate TX scheduler rings\n");
5028                 goto fail;
5029         }
5030
5031         /* Allocate TX rings */
5032         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5033                 if ((error = iwm_alloc_tx_ring(sc,
5034                     &sc->txq[txq_i], txq_i)) != 0) {
5035                         device_printf(dev,
5036                             "could not allocate TX ring %d\n",
5037                             txq_i);
5038                         goto fail;
5039                 }
5040         }
5041
5042         /* Allocate RX ring. */
5043         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5044                 device_printf(dev, "could not allocate RX ring\n");
5045                 goto fail;
5046         }
5047
5048         /* Clear pending interrupts. */
5049         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5050
5051         sc->sc_ifp = ifp = if_alloc(IFT_IEEE80211);
5052         if (ifp == NULL) {
5053                 goto fail;
5054         }
5055         ifp->if_softc = sc;
5056         if_initname(ifp, "iwm", device_get_unit(dev));
5057         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
5058         ifp->if_init = iwm_init;
5059         ifp->if_ioctl = iwm_ioctl;
5060         ifp->if_start = iwm_start;
5061 #if defined(__DragonFly__)
5062         ifp->if_nmbjclusters = IWM_RX_RING_COUNT;
5063         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
5064 #else
5065         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
5066         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
5067         IFQ_SET_READY(&ifp->if_snd);
5068 #endif
5069
5070         /*
5071          * Set it here so we can initialise net80211.
5072          * But, if we fail before we call net80211_ifattach(),
5073          * we can't just call iwm_detach() or it'll free
5074          * net80211 without it having been setup.
5075          */
5076         sc->sc_ic = ic = ifp->if_l2com;
5077         ic->ic_ifp = ifp;
5078 #if defined(__DragonFly__)
5079 #else
5080         ic->ic_softc = sc;
5081         ic->ic_name = device_get_nameunit(sc->sc_dev);
5082 #endif
5083         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5084         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5085
5086         /* Set device capabilities. */
5087         ic->ic_caps =
5088             IEEE80211_C_STA |
5089             IEEE80211_C_WPA |           /* WPA/RSN */
5090             IEEE80211_C_WME |
5091             IEEE80211_C_SHSLOT |        /* short slot time supported */
5092             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5093 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5094             ;
5095         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5096                 sc->sc_phyctxt[i].id = i;
5097                 sc->sc_phyctxt[i].color = 0;
5098                 sc->sc_phyctxt[i].ref = 0;
5099                 sc->sc_phyctxt[i].channel = NULL;
5100         }
5101
5102         /* Max RSSI */
5103         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5104         sc->sc_preinit_hook.ich_func = iwm_preinit;
5105         sc->sc_preinit_hook.ich_arg = sc;
5106         sc->sc_preinit_hook.ich_desc = "iwm";
5107         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5108                 device_printf(dev, "config_intrhook_establish failed\n");
5109                 goto fail;
5110         }
5111
5112 #ifdef IWM_DEBUG
5113         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5114             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5115             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5116 #endif
5117
5118         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5119             "<-%s\n", __func__);
5120
5121         return 0;
5122
5123         /* Free allocated memory if something failed during attachment. */
5124 fail:
5125         iwm_detach_local(sc, 0);
5126
5127         return ENXIO;
5128 }
5129
5130 static int
5131 iwm_update_edca(struct ieee80211com *ic)
5132 {
5133         struct iwm_softc *sc = ic->ic_ifp->if_softc;
5134
5135         device_printf(sc->sc_dev, "%s: called\n", __func__);
5136         return (0);
5137 }
5138
5139 static void
5140 iwm_preinit(void *arg)
5141 {
5142         struct iwm_softc *sc = arg;
5143         device_t dev = sc->sc_dev;
5144         struct ieee80211com *ic = sc->sc_ic;
5145         int error;
5146
5147         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5148             "->%s\n", __func__);
5149
5150         IWM_LOCK(sc);
5151         if ((error = iwm_start_hw(sc)) != 0) {
5152                 device_printf(dev, "could not initialize hardware\n");
5153                 IWM_UNLOCK(sc);
5154                 goto fail;
5155         }
5156
5157         error = iwm_run_init_mvm_ucode(sc, 1);
5158         iwm_stop_device(sc);
5159         if (error) {
5160                 IWM_UNLOCK(sc);
5161                 goto fail;
5162         }
5163         device_printf(dev,
5164             "revision 0x%x, firmware %d.%d (API ver. %d)\n",
5165             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5166             IWM_UCODE_MAJOR(sc->sc_fwver),
5167             IWM_UCODE_MINOR(sc->sc_fwver),
5168             IWM_UCODE_API(sc->sc_fwver));
5169
5170         /* not all hardware can do 5GHz band */
5171         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5172                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5173                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5174         IWM_UNLOCK(sc);
5175
5176         /*
5177          * At this point we've committed - if we fail to do setup,
5178          * we now also have to tear down the net80211 state.
5179          */
5180         wlan_serialize_enter();
5181         ieee80211_ifattach(ic, sc->sc_bssid);
5182         wlan_serialize_exit();
5183         ic->ic_vap_create = iwm_vap_create;
5184         ic->ic_vap_delete = iwm_vap_delete;
5185         ic->ic_raw_xmit = iwm_raw_xmit;
5186         ic->ic_node_alloc = iwm_node_alloc;
5187         ic->ic_scan_start = iwm_scan_start;
5188         ic->ic_scan_end = iwm_scan_end;
5189         ic->ic_update_mcast = iwm_update_mcast;
5190         ic->ic_set_channel = iwm_set_channel;
5191         ic->ic_scan_curchan = iwm_scan_curchan;
5192         ic->ic_scan_mindwell = iwm_scan_mindwell;
5193         ic->ic_wme.wme_update = iwm_update_edca;
5194         iwm_radiotap_attach(sc);
5195         if (bootverbose)
5196                 ieee80211_announce(ic);
5197
5198         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5199             "<-%s\n", __func__);
5200         config_intrhook_disestablish(&sc->sc_preinit_hook);
5201
5202         return;
5203 fail:
5204         config_intrhook_disestablish(&sc->sc_preinit_hook);
5205         iwm_detach_local(sc, 0);
5206 }
5207
5208 /*
5209  * Attach the interface to 802.11 radiotap.
5210  */
5211 static void
5212 iwm_radiotap_attach(struct iwm_softc *sc)
5213 {
5214         struct ifnet *ifp = sc->sc_ifp;
5215         struct ieee80211com *ic = ifp->if_l2com;
5216
5217         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5218             "->%s begin\n", __func__);
5219         ieee80211_radiotap_attach(ic,
5220             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5221                 IWM_TX_RADIOTAP_PRESENT,
5222             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5223                 IWM_RX_RADIOTAP_PRESENT);
5224         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5225             "->%s end\n", __func__);
5226 }
5227
5228 static struct ieee80211vap *
5229 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5230     enum ieee80211_opmode opmode, int flags,
5231     const uint8_t bssid[IEEE80211_ADDR_LEN],
5232     const uint8_t mac[IEEE80211_ADDR_LEN])
5233 {
5234         struct iwm_vap *ivp;
5235         struct ieee80211vap *vap;
5236         uint8_t mac1[IEEE80211_ADDR_LEN];
5237
5238         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5239                 return NULL;
5240         IEEE80211_ADDR_COPY(mac1, mac);
5241         ivp = (struct iwm_vap *) kmalloc(sizeof(struct iwm_vap),
5242                                         M_80211_VAP, M_INTWAIT | M_ZERO);
5243         vap = &ivp->iv_vap;
5244         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
5245         IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
5246         vap->iv_bmissthreshold = 10;            /* override default */
5247         /* Override with driver methods. */
5248         ivp->iv_newstate = vap->iv_newstate;
5249         vap->iv_newstate = iwm_newstate;
5250
5251         ieee80211_ratectl_init(vap);
5252         /* Complete setup. */
5253         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status);
5254         ic->ic_opmode = opmode;
5255
5256         return vap;
5257 }
5258
5259 static void
5260 iwm_vap_delete(struct ieee80211vap *vap)
5261 {
5262         struct iwm_vap *ivp = IWM_VAP(vap);
5263
5264         ieee80211_ratectl_deinit(vap);
5265         ieee80211_vap_detach(vap);
5266         kfree(ivp, M_80211_VAP);
5267 }
5268
5269 static void
5270 iwm_scan_start(struct ieee80211com *ic)
5271 {
5272         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5273         struct iwm_softc *sc = ic->ic_ifp->if_softc;
5274         int error;
5275
5276         if (sc->sc_scanband)
5277                 return;
5278         IWM_LOCK(sc);
5279         error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5280         if (error) {
5281                 device_printf(sc->sc_dev, "could not initiate scan\n");
5282                 IWM_UNLOCK(sc);
5283                 wlan_serialize_enter();
5284                 ieee80211_cancel_scan(vap);
5285                 wlan_serialize_exit();
5286         } else {
5287                 iwm_led_blink_start(sc);
5288                 IWM_UNLOCK(sc);
5289         }
5290 }
5291
5292 static void
5293 iwm_scan_end(struct ieee80211com *ic)
5294 {
5295         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5296         struct iwm_softc *sc = ic->ic_ifp->if_softc;
5297
5298         IWM_LOCK(sc);
5299         iwm_led_blink_stop(sc);
5300         if (vap->iv_state == IEEE80211_S_RUN)
5301                 iwm_mvm_led_enable(sc);
5302         IWM_UNLOCK(sc);
5303 }
5304
5305 static void
5306 iwm_update_mcast(struct ifnet *ifp)
5307 {
5308 }
5309
5310 static void
5311 iwm_set_channel(struct ieee80211com *ic)
5312 {
5313 }
5314
5315 static void
5316 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5317 {
5318 }
5319
5320 static void
5321 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5322 {
5323         return;
5324 }
5325
5326 void
5327 iwm_init_task(void *arg1)
5328 {
5329         struct iwm_softc *sc = arg1;
5330         struct ifnet *ifp = sc->sc_ifp;
5331
5332         IWM_LOCK(sc);
5333         while (sc->sc_flags & IWM_FLAG_BUSY) {
5334 #if defined(__DragonFly__)
5335                 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5336 #else
5337                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5338 #endif
5339 }
5340         sc->sc_flags |= IWM_FLAG_BUSY;
5341         iwm_stop_locked(ifp);
5342 #if defined(__DragonFly__)
5343         if ((ifp->if_flags & IFF_UP) &&
5344             (ifp->if_flags & IFF_RUNNING))
5345 #else
5346         if ((ifp->if_flags & IFF_UP) &&
5347             (ifp->if_drv_flags & IFF_DRV_RUNNING))
5348 #endif
5349                 iwm_init(sc);
5350         sc->sc_flags &= ~IWM_FLAG_BUSY;
5351         wakeup(&sc->sc_flags);
5352         IWM_UNLOCK(sc);
5353 }
5354
5355 static int
5356 iwm_resume(device_t dev)
5357 {
5358         uint16_t reg;
5359
5360         /* Clear device-specific "PCI retry timeout" register (41h). */
5361         reg = pci_read_config(dev, 0x40, sizeof(reg));
5362         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5363         iwm_init_task(device_get_softc(dev));
5364
5365         return 0;
5366 }
5367
5368 static int
5369 iwm_suspend(device_t dev)
5370 {
5371         struct iwm_softc *sc = device_get_softc(dev);
5372         struct ifnet *ifp = sc->sc_ifp;
5373
5374 #if defined(__DragonFly__)
5375         if (ifp->if_flags & IFF_RUNNING)
5376 #else
5377         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5378 #endif
5379                 iwm_stop(ifp, 0);
5380
5381         return (0);
5382 }
5383
5384 static int
5385 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5386 {
5387         struct ifnet *ifp = sc->sc_ifp;
5388         struct ieee80211com *ic;
5389         struct iwm_fw_info *fw = &sc->sc_fw;
5390         device_t dev = sc->sc_dev;
5391         int i;
5392
5393         if (sc->sc_tq) {
5394 #if defined(__DragonFly__)
5395                 /* doesn't exist for DFly, DFly drains tasks on free */
5396 #else
5397                 taskqueue_drain_all(sc->sc_tq);
5398 #endif
5399                 taskqueue_free(sc->sc_tq);
5400 #if defined(__DragonFly__)
5401                 sc->sc_tq = NULL;
5402 #endif
5403         }
5404         if (ifp) {
5405                 callout_drain(&sc->sc_watchdog_to);
5406                 ic = sc->sc_ic;
5407                 iwm_stop_device(sc);
5408                 if (ic && do_net80211) {
5409                         wlan_serialize_enter();
5410                         ieee80211_ifdetach(ic);
5411                         wlan_serialize_exit();
5412                 }
5413                 if_free(ifp);
5414 #if defined(__DragonFly__)
5415                 sc->sc_ifp = NULL;
5416 #endif
5417         }
5418         callout_drain(&sc->sc_led_blink_to);
5419
5420         /* Free descriptor rings */
5421         for (i = 0; i < nitems(sc->txq); i++)
5422                 iwm_free_tx_ring(sc, &sc->txq[i]);
5423
5424         /* Free firmware */
5425         if (fw->fw_fp != NULL)
5426                 iwm_fw_info_free(fw);
5427
5428         /* free scheduler */
5429         iwm_free_sched(sc);
5430         if (sc->ict_dma.vaddr != NULL)
5431                 iwm_free_ict(sc);
5432         if (sc->kw_dma.vaddr != NULL)
5433                 iwm_free_kw(sc);
5434         if (sc->fw_dma.vaddr != NULL)
5435                 iwm_free_fwmem(sc);
5436
5437         /* Finished with the hardware - detach things */
5438         iwm_pci_detach(dev);
5439
5440         lockuninit(&sc->sc_lk);
5441
5442         return (0);
5443 }
5444
5445 static int
5446 iwm_detach(device_t dev)
5447 {
5448         struct iwm_softc *sc = device_get_softc(dev);
5449         int error;
5450
5451         error = iwm_detach_local(sc, 1);
5452
5453         return error;
5454 }
5455
5456 static device_method_t iwm_pci_methods[] = {
5457         /* Device interface */
5458         DEVMETHOD(device_probe,         iwm_probe),
5459         DEVMETHOD(device_attach,        iwm_attach),
5460         DEVMETHOD(device_detach,        iwm_detach),
5461         DEVMETHOD(device_suspend,       iwm_suspend),
5462         DEVMETHOD(device_resume,        iwm_resume),
5463
5464         DEVMETHOD_END
5465 };
5466
5467 static driver_t iwm_pci_driver = {
5468         "iwm",
5469         iwm_pci_methods,
5470         sizeof (struct iwm_softc)
5471 };
5472
5473 static devclass_t iwm_devclass;
5474
5475 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5476 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5477 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5478 MODULE_DEPEND(iwm, wlan, 1, 1, 1);