80211 - Further ifp->if_softc -> ic_softc conversions in wlan drivers.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      wlan_serialize_enter()/exit() hacks (will be removable when we
123  *                                           do the device netif removal).
124  *      header file paths (DFly allows localized path specifications).
125  *      minor header file differences.
126  *
127  * Comprehensive list of adjustments for DragonFly #ifdef'd:
128  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
129  *      packet counters
130  *      RUNNING and OACTIVE tests
131  *      msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
132  *      mtx -> lk  (mtx functions -> lockmgr functions)
133  *      callout differences
134  *      taskqueue differences
135  *      iwm_start() and ifq differences
136  *      iwm_ioctl() differences
137  *      MSI differences
138  *      bus_setup_intr() differences
139  *      minor PCI config register naming differences
140  */
141 #include <sys/cdefs.h>
142 __FBSDID("$FreeBSD$");
143
144 #include <sys/param.h>
145 #include <sys/bus.h>
146 #include <sys/endian.h>
147 #include <sys/firmware.h>
148 #include <sys/kernel.h>
149 #include <sys/malloc.h>
150 #include <sys/mbuf.h>
151 #include <sys/mutex.h>
152 #include <sys/module.h>
153 #include <sys/proc.h>
154 #include <sys/rman.h>
155 #include <sys/socket.h>
156 #include <sys/sockio.h>
157 #include <sys/sysctl.h>
158 #include <sys/linker.h>
159
160 #include <machine/endian.h>
161
162 #include <bus/pci/pcivar.h>
163 #include <bus/pci/pcireg.h>
164
165 #include <net/bpf.h>
166
167 #include <net/if.h>
168 #include <net/if_var.h>
169 #include <net/if_arp.h>
170 #include <net/ethernet.h>
171 #include <net/if_dl.h>
172 #include <net/if_media.h>
173 #include <net/if_types.h>
174 #include <net/ifq_var.h>
175
176 #include <netinet/in.h>
177 #include <netinet/in_systm.h>
178 #include <netinet/if_ether.h>
179 #include <netinet/ip.h>
180
181 #include <netproto/802_11/ieee80211_var.h>
182 #include <netproto/802_11/ieee80211_regdomain.h>
183 #include <netproto/802_11/ieee80211_ratectl.h>
184 #include <netproto/802_11/ieee80211_radiotap.h>
185
186 #include "if_iwmreg.h"
187 #include "if_iwmvar.h"
188 #include "if_iwm_debug.h"
189 #include "if_iwm_util.h"
190 #include "if_iwm_binding.h"
191 #include "if_iwm_phy_db.h"
192 #include "if_iwm_mac_ctxt.h"
193 #include "if_iwm_phy_ctxt.h"
194 #include "if_iwm_time_event.h"
195 #include "if_iwm_power.h"
196 #include "if_iwm_scan.h"
197 #include "if_iwm_pcie_trans.h"
198 #include "if_iwm_led.h"
199
200 const uint8_t iwm_nvm_channels[] = {
201         /* 2.4 GHz */
202         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
203         /* 5 GHz */
204         36, 40, 44 , 48, 52, 56, 60, 64,
205         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
206         149, 153, 157, 161, 165
207 };
208 #define IWM_NUM_2GHZ_CHANNELS   14
209
210 /*
211  * XXX For now, there's simply a fixed set of rate table entries
212  * that are populated.
213  */
214 const struct iwm_rate {
215         uint8_t rate;
216         uint8_t plcp;
217 } iwm_rates[] = {
218         {   2,  IWM_RATE_1M_PLCP  },
219         {   4,  IWM_RATE_2M_PLCP  },
220         {  11,  IWM_RATE_5M_PLCP  },
221         {  22,  IWM_RATE_11M_PLCP },
222         {  12,  IWM_RATE_6M_PLCP  },
223         {  18,  IWM_RATE_9M_PLCP  },
224         {  24,  IWM_RATE_12M_PLCP },
225         {  36,  IWM_RATE_18M_PLCP },
226         {  48,  IWM_RATE_24M_PLCP },
227         {  72,  IWM_RATE_36M_PLCP },
228         {  96,  IWM_RATE_48M_PLCP },
229         { 108,  IWM_RATE_54M_PLCP },
230 };
231 #define IWM_RIDX_CCK    0
232 #define IWM_RIDX_OFDM   4
233 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
234 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
235 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
236
237 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
238 static int      iwm_firmware_store_section(struct iwm_softc *,
239                                            enum iwm_ucode_type,
240                                            const uint8_t *, size_t);
241 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
242 static void     iwm_fw_info_free(struct iwm_fw_info *);
243 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
244 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
245 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
246                                      bus_size_t, bus_size_t);
247 static void     iwm_dma_contig_free(struct iwm_dma_info *);
248 static int      iwm_alloc_fwmem(struct iwm_softc *);
249 static void     iwm_free_fwmem(struct iwm_softc *);
250 static int      iwm_alloc_sched(struct iwm_softc *);
251 static void     iwm_free_sched(struct iwm_softc *);
252 static int      iwm_alloc_kw(struct iwm_softc *);
253 static void     iwm_free_kw(struct iwm_softc *);
254 static int      iwm_alloc_ict(struct iwm_softc *);
255 static void     iwm_free_ict(struct iwm_softc *);
256 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
257 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
258 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
259 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
260                                   int);
261 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
262 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
263 static void     iwm_enable_interrupts(struct iwm_softc *);
264 static void     iwm_restore_interrupts(struct iwm_softc *);
265 static void     iwm_disable_interrupts(struct iwm_softc *);
266 static void     iwm_ict_reset(struct iwm_softc *);
267 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
268 static void     iwm_stop_device(struct iwm_softc *);
269 static void     iwm_mvm_nic_config(struct iwm_softc *);
270 static int      iwm_nic_rx_init(struct iwm_softc *);
271 static int      iwm_nic_tx_init(struct iwm_softc *);
272 static int      iwm_nic_init(struct iwm_softc *);
273 static void     iwm_enable_txq(struct iwm_softc *, int, int);
274 static int      iwm_post_alive(struct iwm_softc *);
275 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
276                                    uint16_t, uint8_t *, uint16_t *);
277 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
278                                      uint16_t *);
279 static void     iwm_init_channel_map(struct iwm_softc *,
280                                      const uint16_t * const);
281 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282                                    const uint16_t *, const uint16_t *, uint8_t,
283                                    uint8_t);
284 struct iwm_nvm_section;
285 static int      iwm_parse_nvm_sections(struct iwm_softc *,
286                                        struct iwm_nvm_section *);
287 static int      iwm_nvm_init(struct iwm_softc *);
288 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
289                                         const uint8_t *, uint32_t);
290 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
291 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
292 static int      iwm_fw_alive(struct iwm_softc *, uint32_t);
293 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
294 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
295 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
296                                               enum iwm_ucode_type);
297 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
298 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
299 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
300 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
301                                             struct iwm_rx_phy_info *);
302 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
303                                       struct iwm_rx_packet *,
304                                       struct iwm_rx_data *);
305 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
306 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
307                                    struct iwm_rx_data *);
308 static void     iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
309                                          struct iwm_rx_packet *,
310                                          struct iwm_node *);
311 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
312                                   struct iwm_rx_data *);
313 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
314 #if 0
315 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
316                                  uint16_t);
317 #endif
318 static const struct iwm_rate *
319         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
320                         struct ieee80211_frame *, struct iwm_tx_cmd *);
321 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
322                        struct ieee80211_node *, int);
323 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
324                              const struct ieee80211_bpf_params *);
325 static void     iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
326                                              struct iwm_mvm_add_sta_cmd_v5 *);
327 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
328                                                 struct iwm_mvm_add_sta_cmd_v6 *,
329                                                 int *);
330 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
331                                        int);
332 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
333 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
334 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
335                                            struct iwm_int_sta *,
336                                            const uint8_t *, uint16_t, uint16_t);
337 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
338 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
339 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
340 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
341 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
342 static struct ieee80211_node *
343                 iwm_node_alloc(struct ieee80211vap *,
344                                const uint8_t[IEEE80211_ADDR_LEN]);
345 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
346 static int      iwm_media_change(struct ifnet *);
347 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
348 static void     iwm_endscan_cb(void *, int);
349 static int      iwm_init_hw(struct iwm_softc *);
350 static void     iwm_init(void *);
351 static void     iwm_init_locked(struct iwm_softc *);
352 #if defined(__DragonFly__)
353 static void     iwm_start(struct ifnet *,  struct ifaltq_subque *);
354 #else
355 static void     iwm_start(struct ifnet *);
356 #endif
357 static void     iwm_start_locked(struct ifnet *);
358 static void     iwm_stop(struct ifnet *, int);
359 static void     iwm_stop_locked(struct ifnet *);
360 static void     iwm_watchdog(void *);
361 #if defined(__DragonFly__)
362 static int      iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t, struct ucred *cred);
363 #else
364 static int      iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
365 #endif
366 #ifdef IWM_DEBUG
367 static const char *
368                 iwm_desc_lookup(uint32_t);
369 static void     iwm_nic_error(struct iwm_softc *);
370 #endif
371 static void     iwm_notif_intr(struct iwm_softc *);
372 static void     iwm_intr(void *);
373 static int      iwm_attach(device_t);
374 static void     iwm_preinit(void *);
375 static int      iwm_detach_local(struct iwm_softc *sc, int);
376 static void     iwm_init_task(void *);
377 static void     iwm_radiotap_attach(struct iwm_softc *);
378 static struct ieee80211vap *
379                 iwm_vap_create(struct ieee80211com *,
380                                const char [IFNAMSIZ], int,
381                                enum ieee80211_opmode, int,
382                                const uint8_t [IEEE80211_ADDR_LEN],
383                                const uint8_t [IEEE80211_ADDR_LEN]);
384 static void     iwm_vap_delete(struct ieee80211vap *);
385 static void     iwm_scan_start(struct ieee80211com *);
386 static void     iwm_scan_end(struct ieee80211com *);
387 static void     iwm_update_mcast(struct ieee80211com *);
388 static void     iwm_set_channel(struct ieee80211com *);
389 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
390 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
391 static int      iwm_detach(device_t);
392
393 #if defined(__DragonFly__)
394 static int      iwm_msi_enable = 1;
395
396 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
397
398 /*
399  * This is a hack due to the wlan_serializer deadlocking sleepers.
400  */
401 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
402
403 int
404 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
405 {
406         int error;
407
408         if (wlan_is_serialized()) {
409                 wlan_serialize_exit();
410                 error = lksleep(chan, lk, flags, wmesg, to);
411                 lockmgr(lk, LK_RELEASE);
412                 wlan_serialize_enter();
413                 lockmgr(lk, LK_EXCLUSIVE);
414         } else {
415                 error = lksleep(chan, lk, flags, wmesg, to);
416         }
417         return error;
418 }
419
420 #endif
421
422 /*
423  * Firmware parser.
424  */
425
426 static int
427 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
428 {
429         const struct iwm_fw_cscheme_list *l = (const void *)data;
430
431         if (dlen < sizeof(*l) ||
432             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
433                 return EINVAL;
434
435         /* we don't actually store anything for now, always use s/w crypto */
436
437         return 0;
438 }
439
440 static int
441 iwm_firmware_store_section(struct iwm_softc *sc,
442     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
443 {
444         struct iwm_fw_sects *fws;
445         struct iwm_fw_onesect *fwone;
446
447         if (type >= IWM_UCODE_TYPE_MAX)
448                 return EINVAL;
449         if (dlen < sizeof(uint32_t))
450                 return EINVAL;
451
452         fws = &sc->sc_fw.fw_sects[type];
453         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
454                 return EINVAL;
455
456         fwone = &fws->fw_sect[fws->fw_count];
457
458         /* first 32bit are device load offset */
459         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
460
461         /* rest is data */
462         fwone->fws_data = data + sizeof(uint32_t);
463         fwone->fws_len = dlen - sizeof(uint32_t);
464
465         fws->fw_count++;
466         fws->fw_totlen += fwone->fws_len;
467
468         return 0;
469 }
470
471 struct iwm_tlv_calib_data {
472         uint32_t ucode_type;
473         struct iwm_tlv_calib_ctrl calib;
474 } __packed;
475
476 static int
477 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
478 {
479         const struct iwm_tlv_calib_data *def_calib = data;
480         uint32_t ucode_type = le32toh(def_calib->ucode_type);
481
482         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
483                 device_printf(sc->sc_dev,
484                     "Wrong ucode_type %u for default "
485                     "calibration.\n", ucode_type);
486                 return EINVAL;
487         }
488
489         sc->sc_default_calib[ucode_type].flow_trigger =
490             def_calib->calib.flow_trigger;
491         sc->sc_default_calib[ucode_type].event_trigger =
492             def_calib->calib.event_trigger;
493
494         return 0;
495 }
496
497 static void
498 iwm_fw_info_free(struct iwm_fw_info *fw)
499 {
500         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
501         fw->fw_fp = NULL;
502         /* don't touch fw->fw_status */
503         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
504 }
505
506 static int
507 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
508 {
509         struct iwm_fw_info *fw = &sc->sc_fw;
510         const struct iwm_tlv_ucode_header *uhdr;
511         struct iwm_ucode_tlv tlv;
512         enum iwm_ucode_tlv_type tlv_type;
513         const struct firmware *fwp;
514         const uint8_t *data;
515         int error = 0;
516         size_t len;
517
518         if (fw->fw_status == IWM_FW_STATUS_DONE &&
519             ucode_type != IWM_UCODE_TYPE_INIT)
520                 return 0;
521
522         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
523 #if defined(__DragonFly__)
524                 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
525 #else
526                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
527 #endif
528         }
529         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
530
531         if (fw->fw_fp != NULL)
532                 iwm_fw_info_free(fw);
533
534         /*
535          * Load firmware into driver memory.
536          * fw_fp will be set.
537          */
538         IWM_UNLOCK(sc);
539         fwp = firmware_get(sc->sc_fwname);
540         if (fwp == NULL) {
541                 device_printf(sc->sc_dev,
542                     "could not read firmware %s (error %d)\n",
543                     sc->sc_fwname, error);
544                 IWM_LOCK(sc);
545                 goto out;
546         }
547         IWM_LOCK(sc);
548         fw->fw_fp = fwp;
549
550         /*
551          * Parse firmware contents
552          */
553
554         uhdr = fw->fw_fp->data;
555         if (*(const uint32_t *)fw->fw_fp->data != 0
556             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
557                 device_printf(sc->sc_dev, "invalid firmware %s\n",
558                     sc->sc_fwname);
559                 error = EINVAL;
560                 goto out;
561         }
562
563         sc->sc_fwver = le32toh(uhdr->ver);
564         data = uhdr->data;
565         len = fw->fw_fp->datasize - sizeof(*uhdr);
566
567         while (len >= sizeof(tlv)) {
568                 size_t tlv_len;
569                 const void *tlv_data;
570
571                 memcpy(&tlv, data, sizeof(tlv));
572                 tlv_len = le32toh(tlv.length);
573                 tlv_type = le32toh(tlv.type);
574
575                 len -= sizeof(tlv);
576                 data += sizeof(tlv);
577                 tlv_data = data;
578
579                 if (len < tlv_len) {
580                         device_printf(sc->sc_dev,
581                             "firmware too short: %zu bytes\n",
582                             len);
583                         error = EINVAL;
584                         goto parse_out;
585                 }
586
587                 switch ((int)tlv_type) {
588                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
589                         if (tlv_len < sizeof(uint32_t)) {
590                                 device_printf(sc->sc_dev,
591                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
592                                     __func__,
593                                     (int) tlv_len);
594                                 error = EINVAL;
595                                 goto parse_out;
596                         }
597                         sc->sc_capa_max_probe_len
598                             = le32toh(*(const uint32_t *)tlv_data);
599                         /* limit it to something sensible */
600                         if (sc->sc_capa_max_probe_len > (1<<16)) {
601                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
602                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
603                                     "ridiculous\n", __func__);
604                                 error = EINVAL;
605                                 goto parse_out;
606                         }
607                         break;
608                 case IWM_UCODE_TLV_PAN:
609                         if (tlv_len) {
610                                 device_printf(sc->sc_dev,
611                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
612                                     __func__,
613                                     (int) tlv_len);
614                                 error = EINVAL;
615                                 goto parse_out;
616                         }
617                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
618                         break;
619                 case IWM_UCODE_TLV_FLAGS:
620                         if (tlv_len < sizeof(uint32_t)) {
621                                 device_printf(sc->sc_dev,
622                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
623                                     __func__,
624                                     (int) tlv_len);
625                                 error = EINVAL;
626                                 goto parse_out;
627                         }
628                         /*
629                          * Apparently there can be many flags, but Linux driver
630                          * parses only the first one, and so do we.
631                          *
632                          * XXX: why does this override IWM_UCODE_TLV_PAN?
633                          * Intentional or a bug?  Observations from
634                          * current firmware file:
635                          *  1) TLV_PAN is parsed first
636                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
637                          * ==> this resets TLV_PAN to itself... hnnnk
638                          */
639                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
640                         break;
641                 case IWM_UCODE_TLV_CSCHEME:
642                         if ((error = iwm_store_cscheme(sc,
643                             tlv_data, tlv_len)) != 0) {
644                                 device_printf(sc->sc_dev,
645                                     "%s: iwm_store_cscheme(): returned %d\n",
646                                     __func__,
647                                     error);
648                                 goto parse_out;
649                         }
650                         break;
651                 case IWM_UCODE_TLV_NUM_OF_CPU:
652                         if (tlv_len != sizeof(uint32_t)) {
653                                 device_printf(sc->sc_dev,
654                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
655                                     __func__,
656                                     (int) tlv_len);
657                                 error = EINVAL;
658                                 goto parse_out;
659                         }
660                         if (le32toh(*(const uint32_t*)tlv_data) != 1) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: driver supports "
663                                     "only TLV_NUM_OF_CPU == 1",
664                                     __func__);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         break;
669                 case IWM_UCODE_TLV_SEC_RT:
670                         if ((error = iwm_firmware_store_section(sc,
671                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
674                                     __func__,
675                                     error);
676                                 goto parse_out;
677                         }
678                         break;
679                 case IWM_UCODE_TLV_SEC_INIT:
680                         if ((error = iwm_firmware_store_section(sc,
681                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
682                                 device_printf(sc->sc_dev,
683                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
684                                     __func__,
685                                     error);
686                                 goto parse_out;
687                         }
688                         break;
689                 case IWM_UCODE_TLV_SEC_WOWLAN:
690                         if ((error = iwm_firmware_store_section(sc,
691                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
692                                 device_printf(sc->sc_dev,
693                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
694                                     __func__,
695                                     error);
696                                 goto parse_out;
697                         }
698                         break;
699                 case IWM_UCODE_TLV_DEF_CALIB:
700                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
701                                 device_printf(sc->sc_dev,
702                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
703                                     __func__,
704                                     (int) tlv_len,
705                                     (int) sizeof(struct iwm_tlv_calib_data));
706                                 error = EINVAL;
707                                 goto parse_out;
708                         }
709                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
710                                 device_printf(sc->sc_dev,
711                                     "%s: iwm_set_default_calib() failed: %d\n",
712                                     __func__,
713                                     error);
714                                 goto parse_out;
715                         }
716                         break;
717                 case IWM_UCODE_TLV_PHY_SKU:
718                         if (tlv_len != sizeof(uint32_t)) {
719                                 error = EINVAL;
720                                 device_printf(sc->sc_dev,
721                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
722                                     __func__,
723                                     (int) tlv_len);
724                                 goto parse_out;
725                         }
726                         sc->sc_fw_phy_config =
727                             le32toh(*(const uint32_t *)tlv_data);
728                         break;
729
730                 case IWM_UCODE_TLV_API_CHANGES_SET:
731                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
732                         /* ignore, not used by current driver */
733                         break;
734
735                 default:
736                         device_printf(sc->sc_dev,
737                             "%s: unknown firmware section %d, abort\n",
738                             __func__, tlv_type);
739                         error = EINVAL;
740                         goto parse_out;
741                 }
742
743                 len -= roundup(tlv_len, 4);
744                 data += roundup(tlv_len, 4);
745         }
746
747         KASSERT(error == 0, ("unhandled error"));
748
749  parse_out:
750         if (error) {
751                 device_printf(sc->sc_dev, "firmware parse error %d, "
752                     "section type %d\n", error, tlv_type);
753         }
754
755         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
756                 device_printf(sc->sc_dev,
757                     "device uses unsupported power ops\n");
758                 error = ENOTSUP;
759         }
760
761  out:
762         if (error) {
763                 fw->fw_status = IWM_FW_STATUS_NONE;
764                 if (fw->fw_fp != NULL)
765                         iwm_fw_info_free(fw);
766         } else
767                 fw->fw_status = IWM_FW_STATUS_DONE;
768         wakeup(&sc->sc_fw);
769
770         return error;
771 }
772
773 /*
774  * DMA resource routines
775  */
776
777 static void
778 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
779 {
780         if (error != 0)
781                 return;
782         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
783         *(bus_addr_t *)arg = segs[0].ds_addr;
784 }
785
786 static int
787 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
788     bus_size_t size, bus_size_t alignment)
789 {
790         int error;
791
792         dma->tag = NULL;
793         dma->size = size;
794
795 #if defined(__DragonFly__)
796         error = bus_dma_tag_create(tag, alignment,
797                                    0,
798                                    BUS_SPACE_MAXADDR_32BIT,
799                                    BUS_SPACE_MAXADDR,
800                                    NULL, NULL,
801                                    size, 1, size,
802                                    BUS_DMA_NOWAIT, &dma->tag);
803 #else
804         error = bus_dma_tag_create(tag, alignment,
805             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
806             1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
807 #endif
808         if (error != 0)
809                 goto fail;
810
811         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
812             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
813         if (error != 0)
814                 goto fail;
815
816         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
817             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
818         if (error != 0)
819                 goto fail;
820
821         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
822
823         return 0;
824
825 fail:
826         iwm_dma_contig_free(dma);
827
828         return error;
829 }
830
831 static void
832 iwm_dma_contig_free(struct iwm_dma_info *dma)
833 {
834         if (dma->map != NULL) {
835                 if (dma->vaddr != NULL) {
836                         bus_dmamap_sync(dma->tag, dma->map,
837                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
838                         bus_dmamap_unload(dma->tag, dma->map);
839                         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
840                         dma->vaddr = NULL;
841                 }
842                 bus_dmamap_destroy(dma->tag, dma->map);
843                 dma->map = NULL;
844         }
845         if (dma->tag != NULL) {
846                 bus_dma_tag_destroy(dma->tag);
847                 dma->tag = NULL;
848         }
849
850 }
851
852 /* fwmem is used to load firmware onto the card */
853 static int
854 iwm_alloc_fwmem(struct iwm_softc *sc)
855 {
856         /* Must be aligned on a 16-byte boundary. */
857         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
858             sc->sc_fwdmasegsz, 16);
859 }
860
861 static void
862 iwm_free_fwmem(struct iwm_softc *sc)
863 {
864         iwm_dma_contig_free(&sc->fw_dma);
865 }
866
867 /* tx scheduler rings.  not used? */
868 static int
869 iwm_alloc_sched(struct iwm_softc *sc)
870 {
871         int rv;
872
873         /* TX scheduler rings must be aligned on a 1KB boundary. */
874         rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
875             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
876         return rv;
877 }
878
879 static void
880 iwm_free_sched(struct iwm_softc *sc)
881 {
882         iwm_dma_contig_free(&sc->sched_dma);
883 }
884
885 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
886 static int
887 iwm_alloc_kw(struct iwm_softc *sc)
888 {
889         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
890 }
891
892 static void
893 iwm_free_kw(struct iwm_softc *sc)
894 {
895         iwm_dma_contig_free(&sc->kw_dma);
896 }
897
898 /* interrupt cause table */
899 static int
900 iwm_alloc_ict(struct iwm_softc *sc)
901 {
902         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
903             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
904 }
905
906 static void
907 iwm_free_ict(struct iwm_softc *sc)
908 {
909         iwm_dma_contig_free(&sc->ict_dma);
910 }
911
912 static int
913 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
914 {
915         bus_size_t size;
916         int i, error;
917
918         ring->cur = 0;
919
920         /* Allocate RX descriptors (256-byte aligned). */
921         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
922         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
923         if (error != 0) {
924                 device_printf(sc->sc_dev,
925                     "could not allocate RX ring DMA memory\n");
926                 goto fail;
927         }
928         ring->desc = ring->desc_dma.vaddr;
929
930         /* Allocate RX status area (16-byte aligned). */
931         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
932             sizeof(*ring->stat), 16);
933         if (error != 0) {
934                 device_printf(sc->sc_dev,
935                     "could not allocate RX status DMA memory\n");
936                 goto fail;
937         }
938         ring->stat = ring->stat_dma.vaddr;
939
940         /* Create RX buffer DMA tag. */
941 #if defined(__DragonFly__)
942         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
943                                    0,
944                                    BUS_SPACE_MAXADDR_32BIT,
945                                    BUS_SPACE_MAXADDR,
946                                    NULL, NULL,
947                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
948                                    BUS_DMA_NOWAIT, &ring->data_dmat);
949 #else
950         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
951             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
952             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
953             &ring->data_dmat);
954 #endif
955         if (error != 0) {
956                 device_printf(sc->sc_dev,
957                     "%s: could not create RX buf DMA tag, error %d\n",
958                     __func__, error);
959                 goto fail;
960         }
961
962         /*
963          * Allocate and map RX buffers.
964          */
965         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
966                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
967                         goto fail;
968                 }
969         }
970         return 0;
971
972 fail:   iwm_free_rx_ring(sc, ring);
973         return error;
974 }
975
976 static void
977 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
978 {
979         /* XXX conditional nic locks are stupid */
980         /* XXX print out if we can't lock the NIC? */
981         if (iwm_nic_lock(sc)) {
982                 /* XXX handle if RX stop doesn't finish? */
983                 (void) iwm_pcie_rx_stop(sc);
984                 iwm_nic_unlock(sc);
985         }
986         ring->cur = 0;
987
988         /*
989          * The hw rx ring index in shared memory must also be cleared,
990          * otherwise the discrepancy can cause reprocessing chaos.
991          */
992         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
993 }
994
995 static void
996 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
997 {
998         int i;
999
1000         iwm_dma_contig_free(&ring->desc_dma);
1001         iwm_dma_contig_free(&ring->stat_dma);
1002
1003         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1004                 struct iwm_rx_data *data = &ring->data[i];
1005
1006                 if (data->m != NULL) {
1007                         bus_dmamap_sync(ring->data_dmat, data->map,
1008                             BUS_DMASYNC_POSTREAD);
1009                         bus_dmamap_unload(ring->data_dmat, data->map);
1010                         m_freem(data->m);
1011                         data->m = NULL;
1012                 }
1013                 if (data->map != NULL) {
1014                         bus_dmamap_destroy(ring->data_dmat, data->map);
1015                         data->map = NULL;
1016                 }
1017         }
1018         if (ring->data_dmat != NULL) {
1019                 bus_dma_tag_destroy(ring->data_dmat);
1020                 ring->data_dmat = NULL;
1021         }
1022 }
1023
1024 static int
1025 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1026 {
1027         bus_addr_t paddr;
1028         bus_size_t size;
1029         int i, error;
1030
1031         ring->qid = qid;
1032         ring->queued = 0;
1033         ring->cur = 0;
1034
1035         /* Allocate TX descriptors (256-byte aligned). */
1036         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1037         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1038         if (error != 0) {
1039                 device_printf(sc->sc_dev,
1040                     "could not allocate TX ring DMA memory\n");
1041                 goto fail;
1042         }
1043         ring->desc = ring->desc_dma.vaddr;
1044
1045         /*
1046          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1047          * to allocate commands space for other rings.
1048          */
1049         if (qid > IWM_MVM_CMD_QUEUE)
1050                 return 0;
1051
1052         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1053         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1054         if (error != 0) {
1055                 device_printf(sc->sc_dev,
1056                     "could not allocate TX cmd DMA memory\n");
1057                 goto fail;
1058         }
1059         ring->cmd = ring->cmd_dma.vaddr;
1060
1061 #if defined(__DragonFly__)
1062         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1063                                    0,
1064                                    BUS_SPACE_MAXADDR_32BIT,
1065                                    BUS_SPACE_MAXADDR,
1066                                    NULL, NULL,
1067                                    MCLBYTES, IWM_MAX_SCATTER - 2, MCLBYTES,
1068                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1069 #else
1070         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1071             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1072             IWM_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1073             &ring->data_dmat);
1074 #endif
1075         if (error != 0) {
1076                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1077                 goto fail;
1078         }
1079
1080         paddr = ring->cmd_dma.paddr;
1081         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1082                 struct iwm_tx_data *data = &ring->data[i];
1083
1084                 data->cmd_paddr = paddr;
1085                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1086                     + offsetof(struct iwm_tx_cmd, scratch);
1087                 paddr += sizeof(struct iwm_device_cmd);
1088
1089                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1090                 if (error != 0) {
1091                         device_printf(sc->sc_dev,
1092                             "could not create TX buf DMA map\n");
1093                         goto fail;
1094                 }
1095         }
1096         KASSERT(paddr == ring->cmd_dma.paddr + size,
1097             ("invalid physical address"));
1098         return 0;
1099
1100 fail:   iwm_free_tx_ring(sc, ring);
1101         return error;
1102 }
1103
1104 static void
1105 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1106 {
1107         int i;
1108
1109         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1110                 struct iwm_tx_data *data = &ring->data[i];
1111
1112                 if (data->m != NULL) {
1113                         bus_dmamap_sync(ring->data_dmat, data->map,
1114                             BUS_DMASYNC_POSTWRITE);
1115                         bus_dmamap_unload(ring->data_dmat, data->map);
1116                         m_freem(data->m);
1117                         data->m = NULL;
1118                 }
1119         }
1120         /* Clear TX descriptors. */
1121         memset(ring->desc, 0, ring->desc_dma.size);
1122         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1123             BUS_DMASYNC_PREWRITE);
1124         sc->qfullmsk &= ~(1 << ring->qid);
1125         ring->queued = 0;
1126         ring->cur = 0;
1127 }
1128
1129 static void
1130 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1131 {
1132         int i;
1133
1134         iwm_dma_contig_free(&ring->desc_dma);
1135         iwm_dma_contig_free(&ring->cmd_dma);
1136
1137         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1138                 struct iwm_tx_data *data = &ring->data[i];
1139
1140                 if (data->m != NULL) {
1141                         bus_dmamap_sync(ring->data_dmat, data->map,
1142                             BUS_DMASYNC_POSTWRITE);
1143                         bus_dmamap_unload(ring->data_dmat, data->map);
1144                         m_freem(data->m);
1145                         data->m = NULL;
1146                 }
1147                 if (data->map != NULL) {
1148                         bus_dmamap_destroy(ring->data_dmat, data->map);
1149                         data->map = NULL;
1150                 }
1151         }
1152         if (ring->data_dmat != NULL) {
1153                 bus_dma_tag_destroy(ring->data_dmat);
1154                 ring->data_dmat = NULL;
1155         }
1156 }
1157
1158 /*
1159  * High-level hardware frobbing routines
1160  */
1161
1162 static void
1163 iwm_enable_interrupts(struct iwm_softc *sc)
1164 {
1165         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1166         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1167 }
1168
1169 static void
1170 iwm_restore_interrupts(struct iwm_softc *sc)
1171 {
1172         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1173 }
1174
1175 static void
1176 iwm_disable_interrupts(struct iwm_softc *sc)
1177 {
1178         /* disable interrupts */
1179         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1180
1181         /* acknowledge all interrupts */
1182         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1183         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1184 }
1185
1186 static void
1187 iwm_ict_reset(struct iwm_softc *sc)
1188 {
1189         iwm_disable_interrupts(sc);
1190
1191         /* Reset ICT table. */
1192         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1193         sc->ict_cur = 0;
1194
1195         /* Set physical address of ICT table (4KB aligned). */
1196         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1197             IWM_CSR_DRAM_INT_TBL_ENABLE
1198             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1199             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1200
1201         /* Switch to ICT interrupt mode in driver. */
1202         sc->sc_flags |= IWM_FLAG_USE_ICT;
1203
1204         /* Re-enable interrupts. */
1205         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1206         iwm_enable_interrupts(sc);
1207 }
1208
1209 /*
1210  * Since this .. hard-resets things, it's time to actually
1211  * mark the first vap (if any) as having no mac context.
1212  * It's annoying, but since the driver is potentially being
1213  * stop/start'ed whilst active (thanks openbsd port!) we
1214  * have to correctly track this.
1215  */
1216 static void
1217 iwm_stop_device(struct iwm_softc *sc)
1218 {
1219         struct ieee80211com *ic = sc->sc_ic;
1220         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1221         int chnl, ntries;
1222         int qid;
1223
1224         /* tell the device to stop sending interrupts */
1225         iwm_disable_interrupts(sc);
1226
1227         /*
1228          * FreeBSD-local: mark the first vap as not-uploaded,
1229          * so the next transition through auth/assoc
1230          * will correctly populate the MAC context.
1231          */
1232         if (vap) {
1233                 struct iwm_vap *iv = IWM_VAP(vap);
1234                 iv->is_uploaded = 0;
1235         }
1236
1237         /* device going down, Stop using ICT table */
1238         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1239
1240         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1241
1242         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1243
1244         /* Stop all DMA channels. */
1245         if (iwm_nic_lock(sc)) {
1246                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1247                         IWM_WRITE(sc,
1248                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1249                         for (ntries = 0; ntries < 200; ntries++) {
1250                                 uint32_t r;
1251
1252                                 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1253                                 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1254                                     chnl))
1255                                         break;
1256                                 DELAY(20);
1257                         }
1258                 }
1259                 iwm_nic_unlock(sc);
1260         }
1261
1262         /* Stop RX ring. */
1263         iwm_reset_rx_ring(sc, &sc->rxq);
1264
1265         /* Reset all TX rings. */
1266         for (qid = 0; qid < nitems(sc->txq); qid++)
1267                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1268
1269         /*
1270          * Power-down device's busmaster DMA clocks
1271          */
1272         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1273         DELAY(5);
1274
1275         /* Make sure (redundant) we've released our request to stay awake */
1276         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1277             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1278
1279         /* Stop the device, and put it in low power state */
1280         iwm_apm_stop(sc);
1281
1282         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1283          * Clean again the interrupt here
1284          */
1285         iwm_disable_interrupts(sc);
1286         /* stop and reset the on-board processor */
1287         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1288
1289         /*
1290          * Even if we stop the HW, we still want the RF kill
1291          * interrupt
1292          */
1293         iwm_enable_rfkill_int(sc);
1294         iwm_check_rfkill(sc);
1295 }
1296
1297 static void
1298 iwm_mvm_nic_config(struct iwm_softc *sc)
1299 {
1300         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1301         uint32_t reg_val = 0;
1302
1303         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1304             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1305         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1306             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1307         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1308             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1309
1310         /* SKU control */
1311         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1312             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1313         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1314             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1315
1316         /* radio configuration */
1317         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1318         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1319         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1320
1321         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1322
1323         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1324             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1325             radio_cfg_step, radio_cfg_dash);
1326
1327         /*
1328          * W/A : NIC is stuck in a reset state after Early PCIe power off
1329          * (PCIe power is lost before PERST# is asserted), causing ME FW
1330          * to lose ownership and not being able to obtain it back.
1331          */
1332         iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1333             IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1334             ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1335 }
1336
1337 static int
1338 iwm_nic_rx_init(struct iwm_softc *sc)
1339 {
1340         if (!iwm_nic_lock(sc))
1341                 return EBUSY;
1342
1343         /*
1344          * Initialize RX ring.  This is from the iwn driver.
1345          */
1346         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1347
1348         /* stop DMA */
1349         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1350         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1351         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1352         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1353         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1354
1355         /* Set physical address of RX ring (256-byte aligned). */
1356         IWM_WRITE(sc,
1357             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1358
1359         /* Set physical address of RX status (16-byte aligned). */
1360         IWM_WRITE(sc,
1361             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1362
1363 #if defined(__DragonFly__)
1364         /* Force serialization (probably not needed but don't trust the HW) */
1365         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1366 #endif
1367
1368         /* Enable RX. */
1369         /*
1370          * Note: Linux driver also sets this:
1371          *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1372          *
1373          * It causes weird behavior.  YMMV.
1374          */
1375         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1376             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1377             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1378             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1379             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1380             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1381
1382         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1383
1384         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1385         if (sc->host_interrupt_operation_mode)
1386                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1387
1388         /*
1389          * Thus sayeth el jefe (iwlwifi) via a comment:
1390          *
1391          * This value should initially be 0 (before preparing any
1392          * RBs), should be 8 after preparing the first 8 RBs (for example)
1393          */
1394         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1395
1396         iwm_nic_unlock(sc);
1397
1398         return 0;
1399 }
1400
1401 static int
1402 iwm_nic_tx_init(struct iwm_softc *sc)
1403 {
1404         int qid;
1405
1406         if (!iwm_nic_lock(sc))
1407                 return EBUSY;
1408
1409         /* Deactivate TX scheduler. */
1410         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1411
1412         /* Set physical address of "keep warm" page (16-byte aligned). */
1413         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1414
1415         /* Initialize TX rings. */
1416         for (qid = 0; qid < nitems(sc->txq); qid++) {
1417                 struct iwm_tx_ring *txq = &sc->txq[qid];
1418
1419                 /* Set physical address of TX ring (256-byte aligned). */
1420                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1421                     txq->desc_dma.paddr >> 8);
1422                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1423                     "%s: loading ring %d descriptors (%p) at %lx\n",
1424                     __func__,
1425                     qid, txq->desc,
1426                     (unsigned long) (txq->desc_dma.paddr >> 8));
1427         }
1428         iwm_nic_unlock(sc);
1429
1430         return 0;
1431 }
1432
1433 static int
1434 iwm_nic_init(struct iwm_softc *sc)
1435 {
1436         int error;
1437
1438         iwm_apm_init(sc);
1439         iwm_set_pwr(sc);
1440
1441         iwm_mvm_nic_config(sc);
1442
1443         if ((error = iwm_nic_rx_init(sc)) != 0)
1444                 return error;
1445
1446         /*
1447          * Ditto for TX, from iwn
1448          */
1449         if ((error = iwm_nic_tx_init(sc)) != 0)
1450                 return error;
1451
1452         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1453             "%s: shadow registers enabled\n", __func__);
1454         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1455
1456         return 0;
1457 }
1458
1459 enum iwm_mvm_tx_fifo {
1460         IWM_MVM_TX_FIFO_BK = 0,
1461         IWM_MVM_TX_FIFO_BE,
1462         IWM_MVM_TX_FIFO_VI,
1463         IWM_MVM_TX_FIFO_VO,
1464         IWM_MVM_TX_FIFO_MCAST = 5,
1465 };
1466
1467 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1468         IWM_MVM_TX_FIFO_VO,
1469         IWM_MVM_TX_FIFO_VI,
1470         IWM_MVM_TX_FIFO_BE,
1471         IWM_MVM_TX_FIFO_BK,
1472 };
1473
1474 static void
1475 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1476 {
1477         if (!iwm_nic_lock(sc)) {
1478                 device_printf(sc->sc_dev,
1479                     "%s: cannot enable txq %d\n",
1480                     __func__,
1481                     qid);
1482                 return; /* XXX return EBUSY */
1483         }
1484
1485         /* unactivate before configuration */
1486         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1487             (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1488             | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1489
1490         if (qid != IWM_MVM_CMD_QUEUE) {
1491                 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1492         }
1493
1494         iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1495
1496         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1497         iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1498
1499         iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1500         /* Set scheduler window size and frame limit. */
1501         iwm_write_mem32(sc,
1502             sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1503             sizeof(uint32_t),
1504             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1505             IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1506             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1507             IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1508
1509         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1510             (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1511             (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1512             (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1513             IWM_SCD_QUEUE_STTS_REG_MSK);
1514
1515         iwm_nic_unlock(sc);
1516
1517         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1518             "%s: enabled txq %d FIFO %d\n",
1519             __func__, qid, fifo);
1520 }
1521
1522 static int
1523 iwm_post_alive(struct iwm_softc *sc)
1524 {
1525         int nwords;
1526         int error, chnl;
1527
1528         if (!iwm_nic_lock(sc))
1529                 return EBUSY;
1530
1531         if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1532                 device_printf(sc->sc_dev,
1533                     "%s: sched addr mismatch",
1534                     __func__);
1535                 error = EINVAL;
1536                 goto out;
1537         }
1538
1539         iwm_ict_reset(sc);
1540
1541         /* Clear TX scheduler state in SRAM. */
1542         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1543             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1544             / sizeof(uint32_t);
1545         error = iwm_write_mem(sc,
1546             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1547             NULL, nwords);
1548         if (error)
1549                 goto out;
1550
1551         /* Set physical address of TX scheduler rings (1KB aligned). */
1552         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1553
1554         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1555
1556         /* enable command channel */
1557         iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1558
1559         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1560
1561         /* Enable DMA channels. */
1562         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1563                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1564                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1565                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1566         }
1567
1568         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1569             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1570
1571         /* Enable L1-Active */
1572         iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1573             IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1574
1575  out:
1576         iwm_nic_unlock(sc);
1577         return error;
1578 }
1579
1580 /*
1581  * NVM read access and content parsing.  We do not support
1582  * external NVM or writing NVM.
1583  * iwlwifi/mvm/nvm.c
1584  */
1585
1586 /* list of NVM sections we are allowed/need to read */
1587 const int nvm_to_read[] = {
1588         IWM_NVM_SECTION_TYPE_HW,
1589         IWM_NVM_SECTION_TYPE_SW,
1590         IWM_NVM_SECTION_TYPE_CALIBRATION,
1591         IWM_NVM_SECTION_TYPE_PRODUCTION,
1592 };
1593
1594 /* Default NVM size to read */
1595 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1596 #define IWM_MAX_NVM_SECTION_SIZE 7000
1597
1598 #define IWM_NVM_WRITE_OPCODE 1
1599 #define IWM_NVM_READ_OPCODE 0
1600
1601 static int
1602 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1603         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1604 {
1605         offset = 0;
1606         struct iwm_nvm_access_cmd nvm_access_cmd = {
1607                 .offset = htole16(offset),
1608                 .length = htole16(length),
1609                 .type = htole16(section),
1610                 .op_code = IWM_NVM_READ_OPCODE,
1611         };
1612         struct iwm_nvm_access_resp *nvm_resp;
1613         struct iwm_rx_packet *pkt;
1614         struct iwm_host_cmd cmd = {
1615                 .id = IWM_NVM_ACCESS_CMD,
1616                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1617                     IWM_CMD_SEND_IN_RFKILL,
1618                 .data = { &nvm_access_cmd, },
1619         };
1620         int ret, bytes_read, offset_read;
1621         uint8_t *resp_data;
1622
1623         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1624
1625         ret = iwm_send_cmd(sc, &cmd);
1626         if (ret)
1627                 return ret;
1628
1629         pkt = cmd.resp_pkt;
1630         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1631                 device_printf(sc->sc_dev,
1632                     "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1633                     __func__, pkt->hdr.flags);
1634                 ret = EIO;
1635                 goto exit;
1636         }
1637
1638         /* Extract NVM response */
1639         nvm_resp = (void *)pkt->data;
1640
1641         ret = le16toh(nvm_resp->status);
1642         bytes_read = le16toh(nvm_resp->length);
1643         offset_read = le16toh(nvm_resp->offset);
1644         resp_data = nvm_resp->data;
1645         if (ret) {
1646                 device_printf(sc->sc_dev,
1647                     "%s: NVM access command failed with status %d\n",
1648                     __func__, ret);
1649                 ret = EINVAL;
1650                 goto exit;
1651         }
1652
1653         if (offset_read != offset) {
1654                 device_printf(sc->sc_dev,
1655                     "%s: NVM ACCESS response with invalid offset %d\n",
1656                     __func__, offset_read);
1657                 ret = EINVAL;
1658                 goto exit;
1659         }
1660
1661         memcpy(data + offset, resp_data, bytes_read);
1662         *len = bytes_read;
1663
1664  exit:
1665         iwm_free_resp(sc, &cmd);
1666         return ret;
1667 }
1668
1669 /*
1670  * Reads an NVM section completely.
1671  * NICs prior to 7000 family doesn't have a real NVM, but just read
1672  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1673  * by uCode, we need to manually check in this case that we don't
1674  * overflow and try to read more than the EEPROM size.
1675  * For 7000 family NICs, we supply the maximal size we can read, and
1676  * the uCode fills the response with as much data as we can,
1677  * without overflowing, so no check is needed.
1678  */
1679 static int
1680 iwm_nvm_read_section(struct iwm_softc *sc,
1681         uint16_t section, uint8_t *data, uint16_t *len)
1682 {
1683         uint16_t length, seglen;
1684         int error;
1685
1686         /* Set nvm section read length */
1687         length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1688         *len = 0;
1689
1690         /* Read the NVM until exhausted (reading less than requested) */
1691         while (seglen == length) {
1692                 error = iwm_nvm_read_chunk(sc,
1693                     section, *len, length, data, &seglen);
1694                 if (error) {
1695                         device_printf(sc->sc_dev,
1696                             "Cannot read NVM from section "
1697                             "%d offset %d, length %d\n",
1698                             section, *len, length);
1699                         return error;
1700                 }
1701                 *len += seglen;
1702         }
1703
1704         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1705             "NVM section %d read completed\n", section);
1706         return 0;
1707 }
1708
1709 /*
1710  * BEGIN IWM_NVM_PARSE
1711  */
1712
1713 /* NVM offsets (in words) definitions */
1714 enum wkp_nvm_offsets {
1715         /* NVM HW-Section offset (in words) definitions */
1716         IWM_HW_ADDR = 0x15,
1717
1718 /* NVM SW-Section offset (in words) definitions */
1719         IWM_NVM_SW_SECTION = 0x1C0,
1720         IWM_NVM_VERSION = 0,
1721         IWM_RADIO_CFG = 1,
1722         IWM_SKU = 2,
1723         IWM_N_HW_ADDRS = 3,
1724         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1725
1726 /* NVM calibration section offset (in words) definitions */
1727         IWM_NVM_CALIB_SECTION = 0x2B8,
1728         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1729 };
1730
1731 /* SKU Capabilities (actual values from NVM definition) */
1732 enum nvm_sku_bits {
1733         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1734         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1735         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1736         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1737 };
1738
1739 /* radio config bits (actual values from NVM definition) */
1740 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1741 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1742 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1743 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1744 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1745 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1746
1747 #define DEFAULT_MAX_TX_POWER 16
1748
1749 /**
1750  * enum iwm_nvm_channel_flags - channel flags in NVM
1751  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1752  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1753  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1754  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1755  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1756  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1757  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1758  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1759  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1760  */
1761 enum iwm_nvm_channel_flags {
1762         IWM_NVM_CHANNEL_VALID = (1 << 0),
1763         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1764         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1765         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1766         IWM_NVM_CHANNEL_DFS = (1 << 7),
1767         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1768         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1769         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1770         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1771 };
1772
1773 /*
1774  * Add a channel to the net80211 channel list.
1775  *
1776  * ieee is the ieee channel number
1777  * ch_idx is channel index.
1778  * mode is the channel mode - CHAN_A, CHAN_B, CHAN_G.
1779  * ch_flags is the iwm channel flags.
1780  *
1781  * Return 0 on OK, < 0 on error.
1782  */
1783 static int
1784 iwm_init_net80211_channel(struct iwm_softc *sc, int ieee, int ch_idx,
1785     int mode, uint16_t ch_flags)
1786 {
1787         /* XXX for now, no overflow checking! */
1788         struct ieee80211com *ic =  sc->sc_ic;
1789         int is_5ghz, flags;
1790         struct ieee80211_channel *channel;
1791
1792         channel = &ic->ic_channels[ic->ic_nchans++];
1793         channel->ic_ieee = ieee;
1794
1795         is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
1796         if (!is_5ghz) {
1797                 flags = IEEE80211_CHAN_2GHZ;
1798                 channel->ic_flags = mode;
1799         } else {
1800                 flags = IEEE80211_CHAN_5GHZ;
1801                 channel->ic_flags = mode;
1802         }
1803         channel->ic_freq = ieee80211_ieee2mhz(ieee, flags);
1804
1805         if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
1806                 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
1807         return (0);
1808 }
1809
1810 static void
1811 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
1812 {
1813         struct ieee80211com *ic =  sc->sc_ic;
1814         struct iwm_nvm_data *data = &sc->sc_nvm;
1815         int ch_idx;
1816         uint16_t ch_flags;
1817         int hw_value;
1818
1819         for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
1820                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1821
1822                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
1823                     !data->sku_cap_band_52GHz_enable)
1824                         ch_flags &= ~IWM_NVM_CHANNEL_VALID;
1825
1826                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1827                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1828                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1829                             iwm_nvm_channels[ch_idx],
1830                             ch_flags,
1831                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1832                             "5.2" : "2.4");
1833                         continue;
1834                 }
1835
1836                 hw_value = iwm_nvm_channels[ch_idx];
1837
1838                 /* 5GHz? */
1839                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS) {
1840                         (void) iwm_init_net80211_channel(sc, hw_value,
1841                             ch_idx,
1842                             IEEE80211_CHAN_A,
1843                             ch_flags);
1844                 } else {
1845                         (void) iwm_init_net80211_channel(sc, hw_value,
1846                             ch_idx,
1847                             IEEE80211_CHAN_B,
1848                             ch_flags);
1849                         /* If it's not channel 13, also add 11g */
1850                         if (hw_value != 13)
1851                                 (void) iwm_init_net80211_channel(sc, hw_value,
1852                                     ch_idx,
1853                                     IEEE80211_CHAN_G,
1854                                     ch_flags);
1855                 }
1856
1857                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1858                     "Ch. %d Flags %x [%sGHz] - Added\n",
1859                     iwm_nvm_channels[ch_idx],
1860                     ch_flags,
1861                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1862                     "5.2" : "2.4");
1863         }
1864         ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1865 }
1866
1867 static int
1868 iwm_parse_nvm_data(struct iwm_softc *sc,
1869         const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1870         const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1871 {
1872         struct iwm_nvm_data *data = &sc->sc_nvm;
1873         uint8_t hw_addr[IEEE80211_ADDR_LEN];
1874         uint16_t radio_cfg, sku;
1875
1876         data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1877
1878         radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1879         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1880         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1881         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1882         data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1883         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1884         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1885
1886         sku = le16_to_cpup(nvm_sw + IWM_SKU);
1887         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1888         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1889         data->sku_cap_11n_enable = 0;
1890
1891         if (!data->valid_tx_ant || !data->valid_rx_ant) {
1892                 device_printf(sc->sc_dev,
1893                     "%s: invalid antennas (0x%x, 0x%x)\n",
1894                     __func__, data->valid_tx_ant,
1895                     data->valid_rx_ant);
1896                 return EINVAL;
1897         }
1898
1899         data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1900
1901         data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1902         data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1903
1904         /* The byte order is little endian 16 bit, meaning 214365 */
1905         IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1906         data->hw_addr[0] = hw_addr[1];
1907         data->hw_addr[1] = hw_addr[0];
1908         data->hw_addr[2] = hw_addr[3];
1909         data->hw_addr[3] = hw_addr[2];
1910         data->hw_addr[4] = hw_addr[5];
1911         data->hw_addr[5] = hw_addr[4];
1912
1913         iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
1914         data->calib_version = 255;   /* TODO:
1915                                         this value will prevent some checks from
1916                                         failing, we need to check if this
1917                                         field is still needed, and if it does,
1918                                         where is it in the NVM */
1919
1920         return 0;
1921 }
1922
1923 /*
1924  * END NVM PARSE
1925  */
1926
1927 struct iwm_nvm_section {
1928         uint16_t length;
1929         const uint8_t *data;
1930 };
1931
1932 static int
1933 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1934 {
1935         const uint16_t *hw, *sw, *calib;
1936
1937         /* Checking for required sections */
1938         if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1939             !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1940                 device_printf(sc->sc_dev,
1941                     "%s: Can't parse empty NVM sections\n",
1942                     __func__);
1943                 return ENOENT;
1944         }
1945
1946         hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1947         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1948         calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1949         return iwm_parse_nvm_data(sc, hw, sw, calib,
1950             IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1951 }
1952
1953 static int
1954 iwm_nvm_init(struct iwm_softc *sc)
1955 {
1956         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1957         int i, section, error;
1958         uint16_t len;
1959         uint8_t *nvm_buffer, *temp;
1960
1961         /* Read From FW NVM */
1962         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1963             "%s: Read NVM\n",
1964             __func__);
1965
1966         /* TODO: find correct NVM max size for a section */
1967         nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1968         if (nvm_buffer == NULL)
1969                 return (ENOMEM);
1970         for (i = 0; i < nitems(nvm_to_read); i++) {
1971                 section = nvm_to_read[i];
1972                 KASSERT(section <= nitems(nvm_sections),
1973                     ("too many sections"));
1974
1975                 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1976                 if (error)
1977                         break;
1978
1979                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1980                 if (temp == NULL) {
1981                         error = ENOMEM;
1982                         break;
1983                 }
1984                 memcpy(temp, nvm_buffer, len);
1985                 nvm_sections[section].data = temp;
1986                 nvm_sections[section].length = len;
1987         }
1988         kfree(nvm_buffer, M_DEVBUF);
1989         if (error)
1990                 return error;
1991
1992         return iwm_parse_nvm_sections(sc, nvm_sections);
1993 }
1994
1995 /*
1996  * Firmware loading gunk.  This is kind of a weird hybrid between the
1997  * iwn driver and the Linux iwlwifi driver.
1998  */
1999
2000 static int
2001 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2002         const uint8_t *section, uint32_t byte_cnt)
2003 {
2004         struct iwm_dma_info *dma = &sc->fw_dma;
2005         int error;
2006
2007         /* Copy firmware section into pre-allocated DMA-safe memory. */
2008         memcpy(dma->vaddr, section, byte_cnt);
2009         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2010
2011         if (!iwm_nic_lock(sc))
2012                 return EBUSY;
2013
2014         sc->sc_fw_chunk_done = 0;
2015
2016         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2017             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2018         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2019             dst_addr);
2020         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2021             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2022         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2023             (iwm_get_dma_hi_addr(dma->paddr)
2024               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2025         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2026             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2027             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2028             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2029         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2030             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2031             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2032             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2033
2034         iwm_nic_unlock(sc);
2035
2036         /* wait 1s for this segment to load */
2037         error = 0;
2038         while (!sc->sc_fw_chunk_done) {
2039 #if defined(__DragonFly__)
2040                 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2041 #else
2042                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2043 #endif
2044                 if (error)
2045                         break;
2046         }
2047
2048         return error;
2049 }
2050
2051 static int
2052 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2053 {
2054         struct iwm_fw_sects *fws;
2055         int error, i, w;
2056         const void *data;
2057         uint32_t dlen;
2058         uint32_t offset;
2059
2060         sc->sc_uc.uc_intr = 0;
2061
2062         fws = &sc->sc_fw.fw_sects[ucode_type];
2063         for (i = 0; i < fws->fw_count; i++) {
2064                 data = fws->fw_sect[i].fws_data;
2065                 dlen = fws->fw_sect[i].fws_len;
2066                 offset = fws->fw_sect[i].fws_devoff;
2067                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2068                     "LOAD FIRMWARE type %d offset %u len %d\n",
2069                     ucode_type, offset, dlen);
2070                 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2071                 if (error) {
2072                         device_printf(sc->sc_dev,
2073                             "%s: chunk %u of %u returned error %02d\n",
2074                             __func__, i, fws->fw_count, error);
2075                         return error;
2076                 }
2077         }
2078
2079         /* wait for the firmware to load */
2080         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2081
2082         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2083 #if defined(__DragonFly__)
2084                 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2085 #else
2086                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2087 #endif
2088         }
2089
2090         return error;
2091 }
2092
2093 static int
2094 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2095 {
2096         int error;
2097
2098         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2099
2100         if ((error = iwm_nic_init(sc)) != 0) {
2101                 device_printf(sc->sc_dev, "unable to init nic\n");
2102                 return error;
2103         }
2104
2105         /* make sure rfkill handshake bits are cleared */
2106         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2107         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2108             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2109
2110         /* clear (again), then enable host interrupts */
2111         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2112         iwm_enable_interrupts(sc);
2113
2114         /* really make sure rfkill handshake bits are cleared */
2115         /* maybe we should write a few times more?  just to make sure */
2116         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2117         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2118
2119         /* Load the given image to the HW */
2120         return iwm_load_firmware(sc, ucode_type);
2121 }
2122
2123 static int
2124 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2125 {
2126         return iwm_post_alive(sc);
2127 }
2128
2129 static int
2130 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2131 {
2132         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2133                 .valid = htole32(valid_tx_ant),
2134         };
2135
2136         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2137             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2138 }
2139
2140 static int
2141 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2142 {
2143         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2144         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2145
2146         /* Set parameters */
2147         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2148         phy_cfg_cmd.calib_control.event_trigger =
2149             sc->sc_default_calib[ucode_type].event_trigger;
2150         phy_cfg_cmd.calib_control.flow_trigger =
2151             sc->sc_default_calib[ucode_type].flow_trigger;
2152
2153         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2154             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2155         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2156             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2157 }
2158
2159 static int
2160 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2161         enum iwm_ucode_type ucode_type)
2162 {
2163         enum iwm_ucode_type old_type = sc->sc_uc_current;
2164         int error;
2165
2166         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2167                 kprintf("iwm_read_firmweare: failed %d\n",
2168                         error);
2169                 return error;
2170         }
2171
2172         sc->sc_uc_current = ucode_type;
2173         error = iwm_start_fw(sc, ucode_type);
2174         if (error) {
2175                 kprintf("iwm_start_fw: failed %d\n", error);
2176                 sc->sc_uc_current = old_type;
2177                 return error;
2178         }
2179
2180         error = iwm_fw_alive(sc, sc->sched_base);
2181         if (error) {
2182                 kprintf("iwm_fw_alive: failed %d\n", error);
2183         }
2184         return error;
2185 }
2186
2187 /*
2188  * mvm misc bits
2189  */
2190
2191 static int
2192 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2193 {
2194         int error;
2195
2196         /* do not operate with rfkill switch turned on */
2197         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2198                 device_printf(sc->sc_dev,
2199                     "radio is disabled by hardware switch\n");
2200                 return EPERM;
2201         }
2202
2203         sc->sc_init_complete = 0;
2204         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2205             IWM_UCODE_TYPE_INIT)) != 0) {
2206                 kprintf("iwm_mvm_load_ucode_wait_alive: failed %d\n",
2207                         error);
2208                 return error;
2209         }
2210
2211         if (justnvm) {
2212                 if ((error = iwm_nvm_init(sc)) != 0) {
2213                         device_printf(sc->sc_dev, "failed to read nvm\n");
2214                         return error;
2215                 }
2216                 IEEE80211_ADDR_COPY(sc->sc_bssid, sc->sc_nvm.hw_addr);
2217
2218                 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2219                     + sc->sc_capa_max_probe_len
2220                     + IWM_MAX_NUM_SCAN_CHANNELS
2221                     * sizeof(struct iwm_scan_channel);
2222                 sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2223                     M_INTWAIT);
2224                 if (sc->sc_scan_cmd == NULL)
2225                         return (ENOMEM);
2226
2227                 return 0;
2228         }
2229
2230         /* Send TX valid antennas before triggering calibrations */
2231         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2232                 kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2233                 return error;
2234         }
2235
2236         /*
2237         * Send phy configurations command to init uCode
2238         * to start the 16.0 uCode init image internal calibrations.
2239         */
2240         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2241                 device_printf(sc->sc_dev,
2242                     "%s: failed to run internal calibration: %d\n",
2243                     __func__, error);
2244                 return error;
2245         }
2246
2247         /*
2248          * Nothing to do but wait for the init complete notification
2249          * from the firmware
2250          */
2251         while (!sc->sc_init_complete) {
2252 #if defined(__DragonFly__)
2253                 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2254                                  0, "iwminit", 2*hz);
2255 #else
2256                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2257                                  0, "iwminit", 2*hz);
2258 #endif
2259                 if (error) {
2260                         kprintf("init complete failed %d\n",
2261                                 sc->sc_init_complete);
2262                         break;
2263                 }
2264         }
2265
2266         return error;
2267 }
2268
2269 /*
2270  * receive side
2271  */
2272
2273 /* (re)stock rx ring, called at init-time and at runtime */
2274 static int
2275 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2276 {
2277         struct iwm_rx_ring *ring = &sc->rxq;
2278         struct iwm_rx_data *data = &ring->data[idx];
2279         struct mbuf *m;
2280         int error;
2281         bus_addr_t paddr;
2282
2283         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2284         if (m == NULL)
2285                 return ENOBUFS;
2286
2287         if (data->m != NULL)
2288                 bus_dmamap_unload(ring->data_dmat, data->map);
2289
2290         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2291         error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2292         if (error != 0) {
2293                 device_printf(sc->sc_dev,
2294                     "%s: could not create RX buf DMA map, error %d\n",
2295                     __func__, error);
2296                 goto fail;
2297         }
2298         data->m = m;
2299         error = bus_dmamap_load(ring->data_dmat, data->map,
2300             mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2301             &paddr, BUS_DMA_NOWAIT);
2302         if (error != 0 && error != EFBIG) {
2303                 device_printf(sc->sc_dev,
2304                     "%s: can't not map mbuf, error %d\n", __func__,
2305                     error);
2306                 goto fail;
2307         }
2308         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2309
2310         /* Update RX descriptor. */
2311         KKASSERT((paddr & 255) == 0);
2312         ring->desc[idx] = htole32(paddr >> 8);
2313         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2314             BUS_DMASYNC_PREWRITE);
2315
2316         return 0;
2317 fail:
2318         return error;
2319 }
2320
2321 #define IWM_RSSI_OFFSET 50
2322 static int
2323 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2324 {
2325         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2326         uint32_t agc_a, agc_b;
2327         uint32_t val;
2328
2329         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2330         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2331         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2332
2333         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2334         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2335         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2336
2337         /*
2338          * dBm = rssi dB - agc dB - constant.
2339          * Higher AGC (higher radio gain) means lower signal.
2340          */
2341         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2342         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2343         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2344
2345         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2346             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2347             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2348
2349         return max_rssi_dbm;
2350 }
2351
2352 /*
2353  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2354  * values are reported by the fw as positive values - need to negate
2355  * to obtain their dBM.  Account for missing antennas by replacing 0
2356  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2357  */
2358 static int
2359 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2360 {
2361         int energy_a, energy_b, energy_c, max_energy;
2362         uint32_t val;
2363
2364         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2365         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2366             IWM_RX_INFO_ENERGY_ANT_A_POS;
2367         energy_a = energy_a ? -energy_a : -256;
2368         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2369             IWM_RX_INFO_ENERGY_ANT_B_POS;
2370         energy_b = energy_b ? -energy_b : -256;
2371         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2372             IWM_RX_INFO_ENERGY_ANT_C_POS;
2373         energy_c = energy_c ? -energy_c : -256;
2374         max_energy = MAX(energy_a, energy_b);
2375         max_energy = MAX(max_energy, energy_c);
2376
2377         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2378             "energy In A %d B %d C %d , and max %d\n",
2379             energy_a, energy_b, energy_c, max_energy);
2380
2381         return max_energy;
2382 }
2383
2384 static void
2385 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2386         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2387 {
2388         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2389
2390         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2391         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2392
2393         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2394 }
2395
2396 /*
2397  * Retrieve the average noise (in dBm) among receivers.
2398  */
2399 static int
2400 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2401 {
2402         int i, total, nbant, noise;
2403
2404         total = nbant = noise = 0;
2405         for (i = 0; i < 3; i++) {
2406                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2407                 if (noise) {
2408                         total += noise;
2409                         nbant++;
2410                 }
2411         }
2412
2413         /* There should be at least one antenna but check anyway. */
2414         return (nbant == 0) ? -127 : (total / nbant) - 107;
2415 }
2416
2417 /*
2418  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2419  *
2420  * Handles the actual data of the Rx packet from the fw
2421  */
2422 static void
2423 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2424         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2425 {
2426         struct ieee80211com *ic = sc->sc_ic;
2427         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2428         struct ieee80211_frame *wh;
2429         struct ieee80211_node *ni;
2430         struct ieee80211_rx_stats rxs;
2431         struct mbuf *m;
2432         struct iwm_rx_phy_info *phy_info;
2433         struct iwm_rx_mpdu_res_start *rx_res;
2434         uint32_t len;
2435         uint32_t rx_pkt_status;
2436         int rssi;
2437
2438         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2439
2440         phy_info = &sc->sc_last_phy_info;
2441         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2442         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2443         len = le16toh(rx_res->byte_count);
2444         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2445
2446         m = data->m;
2447         m->m_data = pkt->data + sizeof(*rx_res);
2448         m->m_pkthdr.len = m->m_len = len;
2449
2450         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2451                 device_printf(sc->sc_dev,
2452                     "dsp size out of range [0,20]: %d\n",
2453                     phy_info->cfg_phy_cnt);
2454                 return;
2455         }
2456
2457         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2458             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2459                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2460                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2461                 return; /* drop */
2462         }
2463
2464         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2465                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2466         } else {
2467                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2468         }
2469         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2470         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2471
2472         /* replenish ring for the buffer we're going to feed to the sharks */
2473         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2474                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2475                     __func__);
2476                 return;
2477         }
2478
2479         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2480
2481         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2482             "%s: phy_info: channel=%d, flags=0x%08x\n",
2483             __func__,
2484             le16toh(phy_info->channel),
2485             le16toh(phy_info->phy_flags));
2486
2487         /*
2488          * Populate an RX state struct with the provided information.
2489          */
2490         bzero(&rxs, sizeof(rxs));
2491         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2492         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2493         rxs.c_ieee = le16toh(phy_info->channel);
2494         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2495                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2496         } else {
2497                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2498         }
2499         rxs.rssi = rssi - sc->sc_noise;
2500         rxs.nf = sc->sc_noise;
2501
2502         if (ieee80211_radiotap_active_vap(vap)) {
2503                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2504
2505                 tap->wr_flags = 0;
2506                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2507                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2508                 tap->wr_chan_freq = htole16(rxs.c_freq);
2509                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2510                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2511                 tap->wr_dbm_antsignal = (int8_t)rssi;
2512                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2513                 tap->wr_tsft = phy_info->system_timestamp;
2514                 switch (phy_info->rate) {
2515                 /* CCK rates. */
2516                 case  10: tap->wr_rate =   2; break;
2517                 case  20: tap->wr_rate =   4; break;
2518                 case  55: tap->wr_rate =  11; break;
2519                 case 110: tap->wr_rate =  22; break;
2520                 /* OFDM rates. */
2521                 case 0xd: tap->wr_rate =  12; break;
2522                 case 0xf: tap->wr_rate =  18; break;
2523                 case 0x5: tap->wr_rate =  24; break;
2524                 case 0x7: tap->wr_rate =  36; break;
2525                 case 0x9: tap->wr_rate =  48; break;
2526                 case 0xb: tap->wr_rate =  72; break;
2527                 case 0x1: tap->wr_rate =  96; break;
2528                 case 0x3: tap->wr_rate = 108; break;
2529                 /* Unknown rate: should not happen. */
2530                 default:  tap->wr_rate =   0;
2531                 }
2532         }
2533
2534         IWM_UNLOCK(sc);
2535         if (ni != NULL) {
2536                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2537                 ieee80211_input_mimo(ni, m, &rxs);
2538                 ieee80211_free_node(ni);
2539         } else {
2540                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2541                 ieee80211_input_mimo_all(ic, m, &rxs);
2542         }
2543         IWM_LOCK(sc);
2544 }
2545
2546 static void
2547 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2548         struct iwm_node *in)
2549 {
2550         struct ifnet *ifp = sc->sc_ifp;
2551         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2552         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2553         int failack = tx_resp->failure_frame;
2554
2555         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2556
2557         /* Update rate control statistics. */
2558         if (status != IWM_TX_STATUS_SUCCESS &&
2559             status != IWM_TX_STATUS_DIRECT_DONE) {
2560 #if defined(__DragonFly__)
2561                 ++ifp->if_oerrors;
2562 #else
2563                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2564 #endif
2565                 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2566                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2567
2568         } else {
2569 #if defined(__DragonFly__)
2570                 ++ifp->if_opackets;
2571 #else
2572                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2573 #endif
2574                 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2575                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2576         }
2577 }
2578
2579 static void
2580 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2581         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2582 {
2583         struct ifnet *ifp = sc->sc_ifp;
2584         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2585         int idx = cmd_hdr->idx;
2586         int qid = cmd_hdr->qid;
2587         struct iwm_tx_ring *ring = &sc->txq[qid];
2588         struct iwm_tx_data *txd = &ring->data[idx];
2589         struct iwm_node *in = txd->in;
2590
2591         if (txd->done) {
2592                 device_printf(sc->sc_dev,
2593                     "%s: got tx interrupt that's already been handled!\n",
2594                     __func__);
2595                 return;
2596         }
2597         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2598
2599         sc->sc_tx_timer = 0;
2600
2601         iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2602
2603         /* Unmap and free mbuf. */
2604         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2605         bus_dmamap_unload(ring->data_dmat, txd->map);
2606         m_freem(txd->m);
2607
2608         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2609             "free txd %p, in %p\n", txd, txd->in);
2610         KASSERT(txd->done == 0, ("txd not done"));
2611         txd->done = 1;
2612         KASSERT(txd->in, ("txd without node"));
2613
2614         txd->m = NULL;
2615         txd->in = NULL;
2616         ieee80211_free_node((struct ieee80211_node *)in);
2617
2618         if (--ring->queued < IWM_TX_RING_LOMARK) {
2619                 sc->qfullmsk &= ~(1 << ring->qid);
2620 #if defined(__DragonFly__)
2621                 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2622                         ifq_clr_oactive(&ifp->if_snd);
2623 #else
2624                 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_DRV_OACTIVE)) {
2625                         ifp->if_flags &= ~IFF_DRV_OACTIVE;
2626 #endif
2627                         /*
2628                          * Well, we're in interrupt context, but then again
2629                          * I guess net80211 does all sorts of stunts in
2630                          * interrupt context, so maybe this is no biggie.
2631                          */
2632                         iwm_start_locked(ifp);
2633                 }
2634         }
2635 }
2636
2637 /*
2638  * transmit side
2639  */
2640
2641 /*
2642  * Process a "command done" firmware notification.  This is where we wakeup
2643  * processes waiting for a synchronous command completion.
2644  * from if_iwn
2645  */
2646 static void
2647 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2648 {
2649         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2650         struct iwm_tx_data *data;
2651
2652         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2653                 return; /* Not a command ack. */
2654         }
2655
2656         data = &ring->data[pkt->hdr.idx];
2657
2658         /* If the command was mapped in an mbuf, free it. */
2659         if (data->m != NULL) {
2660                 bus_dmamap_sync(ring->data_dmat, data->map,
2661                     BUS_DMASYNC_POSTWRITE);
2662                 bus_dmamap_unload(ring->data_dmat, data->map);
2663                 m_freem(data->m);
2664                 data->m = NULL;
2665         }
2666         wakeup(&ring->desc[pkt->hdr.idx]);
2667 }
2668
2669 #if 0
2670 /*
2671  * necessary only for block ack mode
2672  */
2673 void
2674 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2675         uint16_t len)
2676 {
2677         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2678         uint16_t w_val;
2679
2680         scd_bc_tbl = sc->sched_dma.vaddr;
2681
2682         len += 8; /* magic numbers came naturally from paris */
2683         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2684                 len = roundup(len, 4) / 4;
2685
2686         w_val = htole16(sta_id << 12 | len);
2687
2688         /* Update TX scheduler. */
2689         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2690         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2691             BUS_DMASYNC_PREWRITE);
2692
2693         /* I really wonder what this is ?!? */
2694         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2695                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2696                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2697                     BUS_DMASYNC_PREWRITE);
2698         }
2699 }
2700 #endif
2701
2702 /*
2703  * Take an 802.11 (non-n) rate, find the relevant rate
2704  * table entry.  return the index into in_ridx[].
2705  *
2706  * The caller then uses that index back into in_ridx
2707  * to figure out the rate index programmed /into/
2708  * the firmware for this given node.
2709  */
2710 static int
2711 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2712     uint8_t rate)
2713 {
2714         int i;
2715         uint8_t r;
2716
2717         for (i = 0; i < nitems(in->in_ridx); i++) {
2718                 r = iwm_rates[in->in_ridx[i]].rate;
2719                 if (rate == r)
2720                         return (i);
2721         }
2722         /* XXX Return the first */
2723         /* XXX TODO: have it return the /lowest/ */
2724         return (0);
2725 }
2726
2727 /*
2728  * Fill in various bit for management frames, and leave them
2729  * unfilled for data frames (firmware takes care of that).
2730  * Return the selected TX rate.
2731  */
2732 static const struct iwm_rate *
2733 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2734         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2735 {
2736         struct ieee80211com *ic = sc->sc_ic;
2737         struct ieee80211_node *ni = &in->in_ni;
2738         const struct iwm_rate *rinfo;
2739         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2740         int ridx, rate_flags;
2741
2742         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2743         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2744
2745         /*
2746          * XXX TODO: everything about the rate selection here is terrible!
2747          */
2748
2749         if (type == IEEE80211_FC0_TYPE_DATA) {
2750                 int i;
2751                 /* for data frames, use RS table */
2752                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2753                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2754                 ridx = in->in_ridx[i];
2755
2756                 /* This is the index into the programmed table */
2757                 tx->initial_rate_index = i;
2758                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2759                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2760                     "%s: start with i=%d, txrate %d\n",
2761                     __func__, i, iwm_rates[ridx].rate);
2762                 /* XXX no rate_n_flags? */
2763                 return &iwm_rates[ridx];
2764         }
2765
2766         /*
2767          * For non-data, use the lowest supported rate for the given
2768          * operational mode.
2769          *
2770          * Note: there may not be any rate control information available.
2771          * This driver currently assumes if we're transmitting data
2772          * frames, use the rate control table.  Grr.
2773          *
2774          * XXX TODO: use the configured rate for the traffic type!
2775          */
2776         if (ic->ic_curmode == IEEE80211_MODE_11A) {
2777                 /*
2778                  * XXX this assumes the mode is either 11a or not 11a;
2779                  * definitely won't work for 11n.
2780                  */
2781                 ridx = IWM_RIDX_OFDM;
2782         } else {
2783                 ridx = IWM_RIDX_CCK;
2784         }
2785
2786         rinfo = &iwm_rates[ridx];
2787
2788         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2789             __func__, ridx,
2790             rinfo->rate,
2791             !! (IWM_RIDX_IS_CCK(ridx))
2792             );
2793
2794         /* XXX TODO: hard-coded TX antenna? */
2795         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2796         if (IWM_RIDX_IS_CCK(ridx))
2797                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2798         /* XXX hard-coded tx rate */
2799         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2800
2801         return rinfo;
2802 }
2803
2804 #define TB0_SIZE 16
2805 static int
2806 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2807 {
2808         struct ieee80211com *ic = sc->sc_ic;
2809         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2810         struct iwm_node *in = (struct iwm_node *)ni;
2811         struct iwm_tx_ring *ring;
2812         struct iwm_tx_data *data;
2813         struct iwm_tfd *desc;
2814         struct iwm_device_cmd *cmd;
2815         struct iwm_tx_cmd *tx;
2816         struct ieee80211_frame *wh;
2817         struct ieee80211_key *k = NULL;
2818         const struct iwm_rate *rinfo;
2819         uint32_t flags;
2820         u_int hdrlen;
2821         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2822         int nsegs;
2823         uint8_t tid, type;
2824         int i, totlen, error, pad;
2825
2826         wh = mtod(m, struct ieee80211_frame *);
2827         hdrlen = ieee80211_anyhdrsize(wh);
2828         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2829         tid = 0;
2830         ring = &sc->txq[ac];
2831         desc = &ring->desc[ring->cur];
2832         memset(desc, 0, sizeof(*desc));
2833         data = &ring->data[ring->cur];
2834
2835         /* Fill out iwm_tx_cmd to send to the firmware */
2836         cmd = &ring->cmd[ring->cur];
2837         cmd->hdr.code = IWM_TX_CMD;
2838         cmd->hdr.flags = 0;
2839         cmd->hdr.qid = ring->qid;
2840         cmd->hdr.idx = ring->cur;
2841
2842         tx = (void *)cmd->data;
2843         memset(tx, 0, sizeof(*tx));
2844
2845         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2846
2847         /* Encrypt the frame if need be. */
2848         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2849                 /* Retrieve key for TX && do software encryption. */
2850                 k = ieee80211_crypto_encap(ni, m);
2851                 if (k == NULL) {
2852                         m_freem(m);
2853                         return (ENOBUFS);
2854                 }
2855                 /* 802.11 header may have moved. */
2856                 wh = mtod(m, struct ieee80211_frame *);
2857         }
2858
2859         if (ieee80211_radiotap_active_vap(vap)) {
2860                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2861
2862                 tap->wt_flags = 0;
2863                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2864                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2865                 tap->wt_rate = rinfo->rate;
2866                 tap->wt_hwqueue = ac;
2867                 if (k != NULL)
2868                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2869                 ieee80211_radiotap_tx(vap, m);
2870         }
2871
2872
2873         totlen = m->m_pkthdr.len;
2874
2875         flags = 0;
2876         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2877                 flags |= IWM_TX_CMD_FLG_ACK;
2878         }
2879
2880         if (type != IEEE80211_FC0_TYPE_DATA
2881             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2882             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2883                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2884         }
2885
2886         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2887             type != IEEE80211_FC0_TYPE_DATA)
2888                 tx->sta_id = sc->sc_aux_sta.sta_id;
2889         else
2890                 tx->sta_id = IWM_STATION_ID;
2891
2892         if (type == IEEE80211_FC0_TYPE_MGT) {
2893                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2894
2895                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2896                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2897                         tx->pm_frame_timeout = htole16(3);
2898                 else
2899                         tx->pm_frame_timeout = htole16(2);
2900         } else {
2901                 tx->pm_frame_timeout = htole16(0);
2902         }
2903
2904         if (hdrlen & 3) {
2905                 /* First segment length must be a multiple of 4. */
2906                 flags |= IWM_TX_CMD_FLG_MH_PAD;
2907                 pad = 4 - (hdrlen & 3);
2908         } else
2909                 pad = 0;
2910
2911         tx->driver_txop = 0;
2912         tx->next_frame_len = 0;
2913
2914         tx->len = htole16(totlen);
2915         tx->tid_tspec = tid;
2916         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2917
2918         /* Set physical address of "scratch area". */
2919         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2920         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2921
2922         /* Copy 802.11 header in TX command. */
2923         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2924
2925         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2926
2927         tx->sec_ctl = 0;
2928         tx->tx_flags |= htole32(flags);
2929
2930         /* Trim 802.11 header. */
2931         m_adj(m, hdrlen);
2932 #if defined(__DragonFly__)
2933         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2934                                             segs, IWM_MAX_SCATTER - 2,
2935                                             &nsegs, BUS_DMA_NOWAIT);
2936 #else
2937         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2938             segs, &nsegs, BUS_DMA_NOWAIT);
2939 #endif
2940         if (error && error != EFBIG) {
2941                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", error);
2942                 m_freem(m);
2943                 return error;
2944         }
2945         if (error) {
2946                 /* Too many DMA segments, linearize mbuf. */
2947                 if (m_defrag(m, M_NOWAIT)) {
2948                         m_freem(m);
2949                         return ENOBUFS;
2950                 }
2951 #if defined(__DragonFly__)
2952                 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2953                                                     segs, IWM_MAX_SCATTER - 2,
2954                                                     &nsegs, BUS_DMA_NOWAIT);
2955 #else
2956                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2957                     segs, &nsegs, BUS_DMA_NOWAIT);
2958 #endif
2959                 if (error) {
2960                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2961                             error);
2962                         m_freem(m);
2963                         return error;
2964                 }
2965         }
2966         data->m = m;
2967         data->in = in;
2968         data->done = 0;
2969
2970         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2971             "sending txd %p, in %p\n", data, data->in);
2972         KASSERT(data->in != NULL, ("node is NULL"));
2973
2974         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2975             "sending data: qid=%d idx=%d len=%d nsegs=%d\n",
2976             ring->qid, ring->cur, totlen, nsegs);
2977
2978         /* Fill TX descriptor. */
2979         desc->num_tbs = 2 + nsegs;
2980
2981         desc->tbs[0].lo = htole32(data->cmd_paddr);
2982         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2983             (TB0_SIZE << 4);
2984         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2985         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2986             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2987               + hdrlen + pad - TB0_SIZE) << 4);
2988
2989         /* Other DMA segments are for data payload. */
2990         for (i = 0; i < nsegs; i++) {
2991                 seg = &segs[i];
2992                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
2993                 desc->tbs[i+2].hi_n_len = \
2994                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2995                     | ((seg->ds_len) << 4);
2996         }
2997
2998         bus_dmamap_sync(ring->data_dmat, data->map,
2999             BUS_DMASYNC_PREWRITE);
3000         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3001             BUS_DMASYNC_PREWRITE);
3002         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3003             BUS_DMASYNC_PREWRITE);
3004
3005 #if 0
3006         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3007 #endif
3008
3009         /* Kick TX ring. */
3010         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3011         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3012
3013         /* Mark TX ring as full if we reach a certain threshold. */
3014         if (++ring->queued > IWM_TX_RING_HIMARK) {
3015                 sc->qfullmsk |= 1 << ring->qid;
3016         }
3017
3018         return 0;
3019 }
3020
3021 static int
3022 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3023     const struct ieee80211_bpf_params *params)
3024 {
3025         struct ieee80211com *ic = ni->ni_ic;
3026         struct ifnet *ifp = ic->ic_ifp;
3027         struct iwm_softc *sc = ic->ic_softc;
3028         int error = 0;
3029
3030         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3031             "->%s begin\n", __func__);
3032
3033 #if defined(__DragonFly__)
3034         if ((ifp->if_flags & IFF_RUNNING) == 0) {
3035 #else
3036         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3037 #endif
3038                 ieee80211_free_node(ni);
3039                 m_freem(m);
3040                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3041                     "<-%s not RUNNING\n", __func__);
3042                 return (ENETDOWN);
3043         }
3044
3045         IWM_LOCK(sc);
3046         /* XXX fix this */
3047         if (params == NULL) {
3048                 error = iwm_tx(sc, m, ni, 0);
3049         } else {
3050                 error = iwm_tx(sc, m, ni, 0);
3051         }
3052         if (error != 0) {
3053                 /* NB: m is reclaimed on tx failure */
3054                 ieee80211_free_node(ni);
3055 #if defined(__DragonFly__)
3056                 ++ifp->if_oerrors;
3057 #else
3058                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3059 #endif
3060         }
3061         sc->sc_tx_timer = 5;
3062         IWM_UNLOCK(sc);
3063
3064         return (error);
3065 }
3066
3067 /*
3068  * mvm/tx.c
3069  */
3070
3071 #if 0
3072 /*
3073  * Note that there are transports that buffer frames before they reach
3074  * the firmware. This means that after flush_tx_path is called, the
3075  * queue might not be empty. The race-free way to handle this is to:
3076  * 1) set the station as draining
3077  * 2) flush the Tx path
3078  * 3) wait for the transport queues to be empty
3079  */
3080 int
3081 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3082 {
3083         struct iwm_tx_path_flush_cmd flush_cmd = {
3084                 .queues_ctl = htole32(tfd_msk),
3085                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3086         };
3087         int ret;
3088
3089         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3090             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3091             sizeof(flush_cmd), &flush_cmd);
3092         if (ret)
3093                 device_printf(sc->sc_dev,
3094                     "Flushing tx queue failed: %d\n", ret);
3095         return ret;
3096 }
3097 #endif
3098
3099 static void
3100 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3101         struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3102 {
3103         memset(cmd_v5, 0, sizeof(*cmd_v5));
3104
3105         cmd_v5->add_modify = cmd_v6->add_modify;
3106         cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3107         cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3108         IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3109         cmd_v5->sta_id = cmd_v6->sta_id;
3110         cmd_v5->modify_mask = cmd_v6->modify_mask;
3111         cmd_v5->station_flags = cmd_v6->station_flags;
3112         cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3113         cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3114         cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3115         cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3116         cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3117         cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3118         cmd_v5->assoc_id = cmd_v6->assoc_id;
3119         cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3120         cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3121 }
3122
3123 static int
3124 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3125         struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3126 {
3127         struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3128
3129         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3130                 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3131                     sizeof(*cmd), cmd, status);
3132         }
3133
3134         iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3135
3136         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3137             &cmd_v5, status);
3138 }
3139
3140 /* send station add/update command to firmware */
3141 static int
3142 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3143 {
3144         struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3145         int ret;
3146         uint32_t status;
3147
3148         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3149
3150         add_sta_cmd.sta_id = IWM_STATION_ID;
3151         add_sta_cmd.mac_id_n_color
3152             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3153                 IWM_DEFAULT_COLOR));
3154         if (!update) {
3155                 add_sta_cmd.tfd_queue_msk = htole32(0xf);
3156                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3157         }
3158         add_sta_cmd.add_modify = update ? 1 : 0;
3159         add_sta_cmd.station_flags_msk
3160             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3161
3162         status = IWM_ADD_STA_SUCCESS;
3163         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3164         if (ret)
3165                 return ret;
3166
3167         switch (status) {
3168         case IWM_ADD_STA_SUCCESS:
3169                 break;
3170         default:
3171                 ret = EIO;
3172                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3173                 break;
3174         }
3175
3176         return ret;
3177 }
3178
3179 static int
3180 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3181 {
3182         int ret;
3183
3184         ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3185         if (ret)
3186                 return ret;
3187
3188         return 0;
3189 }
3190
3191 static int
3192 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3193 {
3194         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3195 }
3196
3197 static int
3198 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3199         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3200 {
3201         struct iwm_mvm_add_sta_cmd_v6 cmd;
3202         int ret;
3203         uint32_t status;
3204
3205         memset(&cmd, 0, sizeof(cmd));
3206         cmd.sta_id = sta->sta_id;
3207         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3208
3209         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3210
3211         if (addr)
3212                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3213
3214         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3215         if (ret)
3216                 return ret;
3217
3218         switch (status) {
3219         case IWM_ADD_STA_SUCCESS:
3220                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3221                     "%s: Internal station added.\n", __func__);
3222                 return 0;
3223         default:
3224                 device_printf(sc->sc_dev,
3225                     "%s: Add internal station failed, status=0x%x\n",
3226                     __func__, status);
3227                 ret = EIO;
3228                 break;
3229         }
3230         return ret;
3231 }
3232
3233 static int
3234 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3235 {
3236         int ret;
3237
3238         sc->sc_aux_sta.sta_id = 3;
3239         sc->sc_aux_sta.tfd_queue_msk = 0;
3240
3241         ret = iwm_mvm_add_int_sta_common(sc,
3242             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3243
3244         if (ret)
3245                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3246         return ret;
3247 }
3248
3249 static int
3250 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3251 {
3252         struct iwm_time_quota_cmd cmd;
3253         int i, idx, ret, num_active_macs, quota, quota_rem;
3254         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3255         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3256         uint16_t id;
3257
3258         memset(&cmd, 0, sizeof(cmd));
3259
3260         /* currently, PHY ID == binding ID */
3261         if (in) {
3262                 id = in->in_phyctxt->id;
3263                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3264                 colors[id] = in->in_phyctxt->color;
3265
3266                 if (1)
3267                         n_ifs[id] = 1;
3268         }
3269
3270         /*
3271          * The FW's scheduling session consists of
3272          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3273          * equally between all the bindings that require quota
3274          */
3275         num_active_macs = 0;
3276         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3277                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3278                 num_active_macs += n_ifs[i];
3279         }
3280
3281         quota = 0;
3282         quota_rem = 0;
3283         if (num_active_macs) {
3284                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3285                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3286         }
3287
3288         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3289                 if (colors[i] < 0)
3290                         continue;
3291
3292                 cmd.quotas[idx].id_and_color =
3293                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3294
3295                 if (n_ifs[i] <= 0) {
3296                         cmd.quotas[idx].quota = htole32(0);
3297                         cmd.quotas[idx].max_duration = htole32(0);
3298                 } else {
3299                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3300                         cmd.quotas[idx].max_duration = htole32(0);
3301                 }
3302                 idx++;
3303         }
3304
3305         /* Give the remainder of the session to the first binding */
3306         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3307
3308         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3309             sizeof(cmd), &cmd);
3310         if (ret)
3311                 device_printf(sc->sc_dev,
3312                     "%s: Failed to send quota: %d\n", __func__, ret);
3313         return ret;
3314 }
3315
3316 /*
3317  * ieee80211 routines
3318  */
3319
3320 /*
3321  * Change to AUTH state in 80211 state machine.  Roughly matches what
3322  * Linux does in bss_info_changed().
3323  */
3324 static int
3325 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3326 {
3327         struct ieee80211_node *ni;
3328         struct iwm_node *in;
3329         struct iwm_vap *iv = IWM_VAP(vap);
3330         uint32_t duration;
3331         uint32_t min_duration;
3332         int error;
3333
3334         /*
3335          * XXX i have a feeling that the vap node is being
3336          * freed from underneath us. Grr.
3337          */
3338         ni = ieee80211_ref_node(vap->iv_bss);
3339         in = (struct iwm_node *) ni;
3340         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3341             "%s: called; vap=%p, bss ni=%p\n",
3342             __func__,
3343             vap,
3344             ni);
3345
3346         in->in_assoc = 0;
3347
3348         error = iwm_allow_mcast(vap, sc);
3349         if (error) {
3350                 device_printf(sc->sc_dev,
3351                     "%s: failed to set multicast\n", __func__);
3352                 goto out;
3353         }
3354
3355         /*
3356          * This is where it deviates from what Linux does.
3357          *
3358          * Linux iwlwifi doesn't reset the nic each time, nor does it
3359          * call ctxt_add() here.  Instead, it adds it during vap creation,
3360          * and always does does a mac_ctx_changed().
3361          *
3362          * The openbsd port doesn't attempt to do that - it reset things
3363          * at odd states and does the add here.
3364          *
3365          * So, until the state handling is fixed (ie, we never reset
3366          * the NIC except for a firmware failure, which should drag
3367          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3368          * contexts that are required), let's do a dirty hack here.
3369          */
3370         if (iv->is_uploaded) {
3371                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3372                         device_printf(sc->sc_dev,
3373                             "%s: failed to add MAC\n", __func__);
3374                         goto out;
3375                 }
3376         } else {
3377                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3378                         device_printf(sc->sc_dev,
3379                             "%s: failed to add MAC\n", __func__);
3380                         goto out;
3381                 }
3382         }
3383
3384         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3385             in->in_ni.ni_chan, 1, 1)) != 0) {
3386                 device_printf(sc->sc_dev,
3387                     "%s: failed add phy ctxt\n", __func__);
3388                 goto out;
3389         }
3390         in->in_phyctxt = &sc->sc_phyctxt[0];
3391
3392         if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3393                 device_printf(sc->sc_dev,
3394                     "%s: binding cmd\n", __func__);
3395                 goto out;
3396         }
3397
3398         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3399                 device_printf(sc->sc_dev,
3400                     "%s: failed to add MAC\n", __func__);
3401                 goto out;
3402         }
3403
3404         /* a bit superfluous? */
3405         while (sc->sc_auth_prot) {
3406 #if defined(__DragonFly__)
3407                 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmauth", 0);
3408 #else
3409                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmauth", 0);
3410 #endif
3411         }
3412         sc->sc_auth_prot = 1;
3413
3414         duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
3415             200 + in->in_ni.ni_intval);
3416         min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
3417             100 + in->in_ni.ni_intval);
3418         iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
3419
3420         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3421             "%s: waiting for auth_prot\n", __func__);
3422         while (sc->sc_auth_prot != 2) {
3423                 /*
3424                  * well, meh, but if the kernel is sleeping for half a
3425                  * second, we have bigger problems
3426                  */
3427                 if (sc->sc_auth_prot == 0) {
3428                         device_printf(sc->sc_dev,
3429                             "%s: missed auth window!\n", __func__);
3430                         error = ETIMEDOUT;
3431                         goto out;
3432                 } else if (sc->sc_auth_prot == -1) {
3433                         device_printf(sc->sc_dev,
3434                             "%s: no time event, denied!\n", __func__);
3435                         sc->sc_auth_prot = 0;
3436                         error = EAUTH;
3437                         goto out;
3438                 }
3439 #if defined(__DragonFly__)
3440                 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmau2", 0);
3441 #else
3442                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmau2", 0);
3443 #endif
3444         }
3445         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "<-%s\n", __func__);
3446         error = 0;
3447 out:
3448         ieee80211_free_node(ni);
3449         return (error);
3450 }
3451
3452 static int
3453 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3454 {
3455         struct iwm_node *in = (struct iwm_node *)vap->iv_bss;
3456         int error;
3457
3458         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3459                 device_printf(sc->sc_dev,
3460                     "%s: failed to update STA\n", __func__);
3461                 return error;
3462         }
3463
3464         in->in_assoc = 1;
3465         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3466                 device_printf(sc->sc_dev,
3467                     "%s: failed to update MAC\n", __func__);
3468                 return error;
3469         }
3470
3471         return 0;
3472 }
3473
3474 static int
3475 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3476 {
3477         /*
3478          * Ok, so *technically* the proper set of calls for going
3479          * from RUN back to SCAN is:
3480          *
3481          * iwm_mvm_power_mac_disable(sc, in);
3482          * iwm_mvm_mac_ctxt_changed(sc, in);
3483          * iwm_mvm_rm_sta(sc, in);
3484          * iwm_mvm_update_quotas(sc, NULL);
3485          * iwm_mvm_mac_ctxt_changed(sc, in);
3486          * iwm_mvm_binding_remove_vif(sc, in);
3487          * iwm_mvm_mac_ctxt_remove(sc, in);
3488          *
3489          * However, that freezes the device not matter which permutations
3490          * and modifications are attempted.  Obviously, this driver is missing
3491          * something since it works in the Linux driver, but figuring out what
3492          * is missing is a little more complicated.  Now, since we're going
3493          * back to nothing anyway, we'll just do a complete device reset.
3494          * Up your's, device!
3495          */
3496         //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3497         iwm_stop_device(sc);
3498         iwm_init_hw(sc);
3499         if (in)
3500                 in->in_assoc = 0;
3501         return 0;
3502
3503 #if 0
3504         int error;
3505
3506         iwm_mvm_power_mac_disable(sc, in);
3507
3508         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3509                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3510                 return error;
3511         }
3512
3513         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3514                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3515                 return error;
3516         }
3517         error = iwm_mvm_rm_sta(sc, in);
3518         in->in_assoc = 0;
3519         iwm_mvm_update_quotas(sc, NULL);
3520         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3521                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3522                 return error;
3523         }
3524         iwm_mvm_binding_remove_vif(sc, in);
3525
3526         iwm_mvm_mac_ctxt_remove(sc, in);
3527
3528         return error;
3529 #endif
3530 }
3531
3532 static struct ieee80211_node *
3533 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3534 {
3535         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3536             M_INTWAIT | M_ZERO);
3537 }
3538
3539 static void
3540 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3541 {
3542         struct ieee80211_node *ni = &in->in_ni;
3543         struct iwm_lq_cmd *lq = &in->in_lq;
3544         int nrates = ni->ni_rates.rs_nrates;
3545         int i, ridx, tab = 0;
3546         int txant = 0;
3547
3548         if (nrates > nitems(lq->rs_table)) {
3549                 device_printf(sc->sc_dev,
3550                     "%s: node supports %d rates, driver handles "
3551                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3552                 return;
3553         }
3554         if (nrates == 0) {
3555                 device_printf(sc->sc_dev,
3556                     "%s: node supports 0 rates, odd!\n", __func__);
3557                 return;
3558         }
3559
3560         /*
3561          * XXX .. and most of iwm_node is not initialised explicitly;
3562          * it's all just 0x0 passed to the firmware.
3563          */
3564
3565         /* first figure out which rates we should support */
3566         /* XXX TODO: this isn't 11n aware /at all/ */
3567         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3568         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3569             "%s: nrates=%d\n", __func__, nrates);
3570
3571         /*
3572          * Loop over nrates and populate in_ridx from the highest
3573          * rate to the lowest rate.  Remember, in_ridx[] has
3574          * IEEE80211_RATE_MAXSIZE entries!
3575          */
3576         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3577                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3578
3579                 /* Map 802.11 rate to HW rate index. */
3580                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3581                         if (iwm_rates[ridx].rate == rate)
3582                                 break;
3583                 if (ridx > IWM_RIDX_MAX) {
3584                         device_printf(sc->sc_dev,
3585                             "%s: WARNING: device rate for %d not found!\n",
3586                             __func__, rate);
3587                 } else {
3588                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3589                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
3590                             __func__,
3591                             i,
3592                             rate,
3593                             ridx);
3594                         in->in_ridx[i] = ridx;
3595                 }
3596         }
3597
3598         /* then construct a lq_cmd based on those */
3599         memset(lq, 0, sizeof(*lq));
3600         lq->sta_id = IWM_STATION_ID;
3601
3602         /*
3603          * are these used? (we don't do SISO or MIMO)
3604          * need to set them to non-zero, though, or we get an error.
3605          */
3606         lq->single_stream_ant_msk = 1;
3607         lq->dual_stream_ant_msk = 1;
3608
3609         /*
3610          * Build the actual rate selection table.
3611          * The lowest bits are the rates.  Additionally,
3612          * CCK needs bit 9 to be set.  The rest of the bits
3613          * we add to the table select the tx antenna
3614          * Note that we add the rates in the highest rate first
3615          * (opposite of ni_rates).
3616          */
3617         /*
3618          * XXX TODO: this should be looping over the min of nrates
3619          * and LQ_MAX_RETRY_NUM.  Sigh.
3620          */
3621         for (i = 0; i < nrates; i++) {
3622                 int nextant;
3623
3624                 if (txant == 0)
3625                         txant = IWM_FW_VALID_TX_ANT(sc);
3626                 nextant = 1<<(ffs(txant)-1);
3627                 txant &= ~nextant;
3628
3629                 /*
3630                  * Map the rate id into a rate index into
3631                  * our hardware table containing the
3632                  * configuration to use for this rate.
3633                  */
3634                 ridx = in->in_ridx[i];
3635                 tab = iwm_rates[ridx].plcp;
3636                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3637                 if (IWM_RIDX_IS_CCK(ridx))
3638                         tab |= IWM_RATE_MCS_CCK_MSK;
3639                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3640                     "station rate i=%d, rate=%d, hw=%x\n",
3641                     i, iwm_rates[ridx].rate, tab);
3642                 lq->rs_table[i] = htole32(tab);
3643         }
3644         /* then fill the rest with the lowest possible rate */
3645         for (i = nrates; i < nitems(lq->rs_table); i++) {
3646                 KASSERT(tab != 0, ("invalid tab"));
3647                 lq->rs_table[i] = htole32(tab);
3648         }
3649 }
3650
3651 static int
3652 iwm_media_change(struct ifnet *ifp)
3653 {
3654         struct iwm_softc *sc = ifp->if_softc;
3655         int error;
3656
3657         error = ieee80211_media_change(ifp);
3658         if (error != ENETRESET)
3659                 return error;
3660
3661 #if defined(__DragonFly__)
3662         if ((ifp->if_flags & IFF_UP) &&
3663             (ifp->if_flags & IFF_RUNNING)) {
3664 #else
3665         if ((ifp->if_flags & IFF_UP) &&
3666             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3667 #endif
3668                 iwm_stop(ifp, 0);
3669                 iwm_init(sc);
3670         }
3671         return error;
3672 }
3673
3674
3675 static int
3676 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3677 {
3678         struct iwm_vap *ivp = IWM_VAP(vap);
3679         struct ieee80211com *ic = vap->iv_ic;
3680         struct iwm_softc *sc = ic->ic_softc;
3681         struct iwm_node *in;
3682         int error;
3683
3684         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3685             "switching state %s -> %s\n",
3686             ieee80211_state_name[vap->iv_state],
3687             ieee80211_state_name[nstate]);
3688         IEEE80211_UNLOCK(ic);
3689         IWM_LOCK(sc);
3690
3691         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3692                 iwm_led_blink_stop(sc);
3693
3694         /* disable beacon filtering if we're hopping out of RUN */
3695         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3696                 iwm_mvm_disable_beacon_filter(sc);
3697
3698                 if (((in = (void *)vap->iv_bss) != NULL))
3699                         in->in_assoc = 0;
3700
3701                 iwm_release(sc, NULL);
3702
3703                 /*
3704                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
3705                  * above then the card will be completely reinitialized,
3706                  * so the driver must do everything necessary to bring the card
3707                  * from INIT to SCAN.
3708                  *
3709                  * Additionally, upon receiving deauth frame from AP,
3710                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3711                  * state. This will also fail with this driver, so bring the FSM
3712                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3713                  *
3714                  * XXX TODO: fix this for FreeBSD!
3715                  */
3716                 if (nstate == IEEE80211_S_SCAN ||
3717                     nstate == IEEE80211_S_AUTH ||
3718                     nstate == IEEE80211_S_ASSOC) {
3719                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3720                             "Force transition to INIT; MGT=%d\n", arg);
3721                         IWM_UNLOCK(sc);
3722                         IEEE80211_LOCK(ic);
3723                         vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3724                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3725                             "Going INIT->SCAN\n");
3726                         nstate = IEEE80211_S_SCAN;
3727                         IEEE80211_UNLOCK(ic);
3728                         IWM_LOCK(sc);
3729                 }
3730         }
3731
3732         switch (nstate) {
3733         case IEEE80211_S_INIT:
3734                 sc->sc_scanband = 0;
3735                 break;
3736
3737         case IEEE80211_S_AUTH:
3738                 if ((error = iwm_auth(vap, sc)) != 0) {
3739                         device_printf(sc->sc_dev,
3740                             "%s: could not move to auth state: %d\n",
3741                             __func__, error);
3742                         break;
3743                 }
3744                 break;
3745
3746         case IEEE80211_S_ASSOC:
3747                 if ((error = iwm_assoc(vap, sc)) != 0) {
3748                         device_printf(sc->sc_dev,
3749                             "%s: failed to associate: %d\n", __func__,
3750                             error);
3751                         break;
3752                 }
3753                 break;
3754
3755         case IEEE80211_S_RUN:
3756         {
3757                 struct iwm_host_cmd cmd = {
3758                         .id = IWM_LQ_CMD,
3759                         .len = { sizeof(in->in_lq), },
3760                         .flags = IWM_CMD_SYNC,
3761                 };
3762
3763                 /* Update the association state, now we have it all */
3764                 /* (eg associd comes in at this point */
3765                 error = iwm_assoc(vap, sc);
3766                 if (error != 0) {
3767                         device_printf(sc->sc_dev,
3768                             "%s: failed to update association state: %d\n",
3769                             __func__,
3770                             error);
3771                         break;
3772                 }
3773
3774                 in = (struct iwm_node *)vap->iv_bss;
3775                 iwm_mvm_power_mac_update_mode(sc, in);
3776                 iwm_mvm_enable_beacon_filter(sc, in);
3777                 iwm_mvm_update_quotas(sc, in);
3778                 iwm_setrates(sc, in);
3779
3780                 cmd.data[0] = &in->in_lq;
3781                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3782                         device_printf(sc->sc_dev,
3783                             "%s: IWM_LQ_CMD failed\n", __func__);
3784                 }
3785
3786                 iwm_mvm_led_enable(sc);
3787                 break;
3788         }
3789
3790         default:
3791                 break;
3792         }
3793         IWM_UNLOCK(sc);
3794         IEEE80211_LOCK(ic);
3795
3796         return (ivp->iv_newstate(vap, nstate, arg));
3797 }
3798
3799 void
3800 iwm_endscan_cb(void *arg, int pending)
3801 {
3802         struct iwm_softc *sc = arg;
3803         struct ieee80211com *ic = sc->sc_ic;
3804         int done;
3805         int error;
3806
3807         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3808             "%s: scan ended\n",
3809             __func__);
3810
3811         IWM_LOCK(sc);
3812         if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3813             sc->sc_nvm.sku_cap_band_52GHz_enable) {
3814                 done = 0;
3815                 if ((error = iwm_mvm_scan_request(sc,
3816                     IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3817                         device_printf(sc->sc_dev, "could not initiate scan\n");
3818                         done = 1;
3819                 }
3820         } else {
3821                 done = 1;
3822         }
3823
3824         if (done) {
3825                 IWM_UNLOCK(sc);
3826                 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3827                 IWM_LOCK(sc);
3828                 sc->sc_scanband = 0;
3829         }
3830         IWM_UNLOCK(sc);
3831 }
3832
3833 static int
3834 iwm_init_hw(struct iwm_softc *sc)
3835 {
3836         struct ieee80211com *ic = sc->sc_ic;
3837         int error, i, qid;
3838
3839         if ((error = iwm_start_hw(sc)) != 0) {
3840                 kprintf("iwm_start_hw: failed %d\n", error);
3841                 return error;
3842         }
3843
3844         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3845                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3846                 return error;
3847         }
3848
3849         /*
3850          * should stop and start HW since that INIT
3851          * image just loaded
3852          */
3853         iwm_stop_device(sc);
3854         if ((error = iwm_start_hw(sc)) != 0) {
3855                 device_printf(sc->sc_dev, "could not initialize hardware\n");
3856                 return error;
3857         }
3858
3859         /* omstart, this time with the regular firmware */
3860         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3861         if (error) {
3862                 device_printf(sc->sc_dev, "could not load firmware\n");
3863                 goto error;
3864         }
3865
3866         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3867                 device_printf(sc->sc_dev, "antenna config failed\n");
3868                 goto error;
3869         }
3870
3871         /* Send phy db control command and then phy db calibration*/
3872         if ((error = iwm_send_phy_db_data(sc)) != 0) {
3873                 device_printf(sc->sc_dev, "phy_db_data failed\n");
3874                 goto error;
3875         }
3876
3877         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3878                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3879                 goto error;
3880         }
3881
3882         /* Add auxiliary station for scanning */
3883         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3884                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
3885                 goto error;
3886         }
3887
3888         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3889                 /*
3890                  * The channel used here isn't relevant as it's
3891                  * going to be overwritten in the other flows.
3892                  * For now use the first channel we have.
3893                  */
3894                 if ((error = iwm_mvm_phy_ctxt_add(sc,
3895                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3896                         goto error;
3897         }
3898
3899         error = iwm_mvm_power_update_device(sc);
3900         if (error)
3901                 goto error;
3902
3903         /* Mark TX rings as active. */
3904         for (qid = 0; qid < 4; qid++) {
3905                 iwm_enable_txq(sc, qid, qid);
3906         }
3907
3908         return 0;
3909
3910  error:
3911         iwm_stop_device(sc);
3912         return error;
3913 }
3914
3915 /* Allow multicast from our BSSID. */
3916 static int
3917 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3918 {
3919         struct ieee80211_node *ni = vap->iv_bss;
3920         struct iwm_mcast_filter_cmd *cmd;
3921         size_t size;
3922         int error;
3923
3924         size = roundup(sizeof(*cmd), 4);
3925         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3926         if (cmd == NULL)
3927                 return ENOMEM;
3928         cmd->filter_own = 1;
3929         cmd->port_id = 0;
3930         cmd->count = 0;
3931         cmd->pass_all = 1;
3932         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3933
3934         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3935             IWM_CMD_SYNC, size, cmd);
3936         kfree(cmd, M_DEVBUF);
3937
3938         return (error);
3939 }
3940
3941 /*
3942  * ifnet interfaces
3943  */
3944
3945 static void
3946 iwm_init(void *arg)
3947 {
3948         struct iwm_softc *sc = arg;
3949
3950         IWM_LOCK(sc);
3951         iwm_init_locked(sc);
3952         IWM_UNLOCK(sc);
3953 }
3954
3955 static void
3956 iwm_init_locked(struct iwm_softc *sc)
3957 {
3958         struct ifnet *ifp = sc->sc_ifp;
3959         int error;
3960
3961         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3962                 return;
3963         }
3964         sc->sc_generation++;
3965         sc->sc_flags &= ~IWM_FLAG_STOPPED;
3966
3967         if ((error = iwm_init_hw(sc)) != 0) {
3968                 kprintf("iwm_init_hw failed %d\n", error);
3969                 iwm_stop_locked(ifp);
3970                 return;
3971         }
3972
3973         /*
3974          * Ok, firmware loaded and we are jogging
3975          */
3976 #if defined(__DragonFly__)
3977         ifq_clr_oactive(&ifp->if_snd);
3978         ifp->if_flags |= IFF_RUNNING;
3979 #else
3980         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3981         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3982 #endif
3983         sc->sc_flags |= IWM_FLAG_HW_INITED;
3984         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3985 }
3986
3987 /*
3988  * Dequeue packets from sendq and call send.
3989  * mostly from iwn
3990  */
3991 #if defined(__DragonFly__)
3992 static void
3993 iwm_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3994 #else
3995 static void
3996 iwm_start(struct ifnet *ifp)
3997 #endif
3998 {
3999         struct iwm_softc *sc = ifp->if_softc;
4000
4001         IWM_LOCK(sc);
4002         iwm_start_locked(ifp);
4003         IWM_UNLOCK(sc);
4004 }
4005
4006 static void
4007 iwm_start_locked(struct ifnet *ifp)
4008 {
4009         struct iwm_softc *sc = ifp->if_softc;
4010         struct ieee80211_node *ni;
4011         struct mbuf *m;
4012         int ac = 0;
4013
4014 #if defined(__DragonFly__)
4015         if ((ifp->if_flags & IFF_RUNNING) == 0)
4016                 ifq_purge(&ifp->if_snd);
4017         if (ifq_is_oactive(&ifp->if_snd) || (ifp->if_flags & IFF_RUNNING) == 0)
4018                 return;
4019 #else
4020         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
4021                 return;
4022 #endif
4023
4024         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4025         for (;;) {
4026                 /* why isn't this done per-queue? */
4027                 if (sc->qfullmsk != 0) {
4028 #if defined(__DragonFly__)
4029                         ifq_set_oactive(&ifp->if_snd);
4030 #else
4031                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4032 #endif
4033                         break;
4034                 }
4035                 m = ifq_dequeue(&ifp->if_snd);
4036                 if (!m)
4037                         break;
4038                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4039                 if (iwm_tx(sc, m, ni, ac) != 0) {
4040                         ieee80211_free_node(ni);
4041 #if defined(__DragonFly__)
4042                         ++ifp->if_oerrors;
4043 #else
4044                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4045 #endif
4046                         continue;
4047                 }
4048
4049                 if (ifp->if_flags & IFF_UP) {
4050                         sc->sc_tx_timer = 15;
4051                 }
4052         }
4053         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4054 }
4055
4056 static void
4057 iwm_stop(struct ifnet *ifp, int disable)
4058 {
4059         struct iwm_softc *sc = ifp->if_softc;
4060
4061         IWM_LOCK(sc);
4062         iwm_stop_locked(ifp);
4063         IWM_UNLOCK(sc);
4064 }
4065
4066 static void
4067 iwm_stop_locked(struct ifnet *ifp)
4068 {
4069         struct iwm_softc *sc = ifp->if_softc;
4070
4071         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4072         sc->sc_flags |= IWM_FLAG_STOPPED;
4073         sc->sc_generation++;
4074         sc->sc_scanband = 0;
4075         sc->sc_auth_prot = 0;
4076 #if defined(__DragonFly__)
4077         ifq_clr_oactive(&ifp->if_snd);
4078         ifp->if_flags &= ~IFF_RUNNING;
4079 #else
4080         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4081 #endif
4082         iwm_led_blink_stop(sc);
4083         sc->sc_tx_timer = 0;
4084         iwm_stop_device(sc);
4085 }
4086
4087 static void
4088 iwm_watchdog(void *arg)
4089 {
4090         struct iwm_softc *sc = arg;
4091         struct ifnet *ifp = sc->sc_ifp;
4092
4093 #if defined(__DragonFly__)
4094 #else
4095         KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4096 #endif
4097         if (sc->sc_tx_timer > 0) {
4098                 if (--sc->sc_tx_timer == 0) {
4099                         device_printf(sc->sc_dev, "device timeout\n");
4100 #ifdef IWM_DEBUG
4101                         iwm_nic_error(sc);
4102 #endif
4103                         ifp->if_flags &= ~IFF_UP;
4104                         iwm_stop_locked(ifp);
4105 #if defined(__DragonFly__)
4106                         ++ifp->if_oerrors;
4107 #else
4108                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4109 #endif
4110                         return;
4111                 }
4112         }
4113         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4114 }
4115
4116 #if defined(__DragonFly__)
4117 static int
4118 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data, struct ucred *cred)
4119 #else
4120 static int
4121 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
4122 #endif
4123 {
4124         struct iwm_softc *sc = ifp->if_softc;
4125         struct ieee80211com *ic = sc->sc_ic;
4126         struct ifreq *ifr = (struct ifreq *) data;
4127         int error = 0, startall = 0;
4128
4129         switch (cmd) {
4130         case SIOCGIFADDR:
4131                 error = ether_ioctl(ifp, cmd, data);
4132                 break;
4133         case SIOCGIFMEDIA:
4134                 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4135                 break;
4136         case SIOCSIFFLAGS:
4137                 IWM_LOCK(sc);
4138 #if defined(__DragonFly__)
4139                 if (ifp->if_flags & IFF_UP) {
4140                         if (!(ifp->if_flags & IFF_RUNNING)) {
4141                                 iwm_init_locked(sc);
4142                                 startall = 1;
4143                         }
4144                 } else {
4145                         if (ifp->if_flags & IFF_RUNNING)
4146                                 iwm_stop_locked(ifp);
4147                 }
4148 #else
4149                 if (ifp->if_flags & IFF_UP) {
4150                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4151                                 iwm_init_locked(sc);
4152                                 startall = 1;
4153                         }
4154                 } else {
4155                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4156                                 iwm_stop_locked(ifp);
4157                 }
4158 #endif
4159                 IWM_UNLOCK(sc);
4160                 if (startall)
4161                         ieee80211_start_all(ic);
4162
4163                 break;
4164         default:
4165                 error = EINVAL;
4166                 break;
4167         }
4168
4169         return error;
4170 }
4171
4172 /*
4173  * The interrupt side of things
4174  */
4175
4176 /*
4177  * error dumping routines are from iwlwifi/mvm/utils.c
4178  */
4179
4180 /*
4181  * Note: This structure is read from the device with IO accesses,
4182  * and the reading already does the endian conversion. As it is
4183  * read with uint32_t-sized accesses, any members with a different size
4184  * need to be ordered correctly though!
4185  */
4186 struct iwm_error_event_table {
4187         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4188         uint32_t error_id;              /* type of error */
4189         uint32_t pc;                    /* program counter */
4190         uint32_t blink1;                /* branch link */
4191         uint32_t blink2;                /* branch link */
4192         uint32_t ilink1;                /* interrupt link */
4193         uint32_t ilink2;                /* interrupt link */
4194         uint32_t data1;         /* error-specific data */
4195         uint32_t data2;         /* error-specific data */
4196         uint32_t data3;         /* error-specific data */
4197         uint32_t bcon_time;             /* beacon timer */
4198         uint32_t tsf_low;               /* network timestamp function timer */
4199         uint32_t tsf_hi;                /* network timestamp function timer */
4200         uint32_t gp1;           /* GP1 timer register */
4201         uint32_t gp2;           /* GP2 timer register */
4202         uint32_t gp3;           /* GP3 timer register */
4203         uint32_t ucode_ver;             /* uCode version */
4204         uint32_t hw_ver;                /* HW Silicon version */
4205         uint32_t brd_ver;               /* HW board version */
4206         uint32_t log_pc;                /* log program counter */
4207         uint32_t frame_ptr;             /* frame pointer */
4208         uint32_t stack_ptr;             /* stack pointer */
4209         uint32_t hcmd;          /* last host command header */
4210         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4211                                  * rxtx_flag */
4212         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4213                                  * host_flag */
4214         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4215                                  * enc_flag */
4216         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4217                                  * time_flag */
4218         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4219                                  * wico interrupt */
4220         uint32_t isr_pref;              /* isr status register LMPM_NIC_PREF_STAT */
4221         uint32_t wait_event;            /* wait event() caller address */
4222         uint32_t l2p_control;   /* L2pControlField */
4223         uint32_t l2p_duration;  /* L2pDurationField */
4224         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4225         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4226         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4227                                  * (LMPM_PMG_SEL) */
4228         uint32_t u_timestamp;   /* indicate when the date and time of the
4229                                  * compilation */
4230         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4231 } __packed;
4232
4233 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4234 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4235
4236 #ifdef IWM_DEBUG
4237 struct {
4238         const char *name;
4239         uint8_t num;
4240 } advanced_lookup[] = {
4241         { "NMI_INTERRUPT_WDG", 0x34 },
4242         { "SYSASSERT", 0x35 },
4243         { "UCODE_VERSION_MISMATCH", 0x37 },
4244         { "BAD_COMMAND", 0x38 },
4245         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4246         { "FATAL_ERROR", 0x3D },
4247         { "NMI_TRM_HW_ERR", 0x46 },
4248         { "NMI_INTERRUPT_TRM", 0x4C },
4249         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4250         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4251         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4252         { "NMI_INTERRUPT_HOST", 0x66 },
4253         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4254         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4255         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4256         { "ADVANCED_SYSASSERT", 0 },
4257 };
4258
4259 static const char *
4260 iwm_desc_lookup(uint32_t num)
4261 {
4262         int i;
4263
4264         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4265                 if (advanced_lookup[i].num == num)
4266                         return advanced_lookup[i].name;
4267
4268         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4269         return advanced_lookup[i].name;
4270 }
4271
4272 /*
4273  * Support for dumping the error log seemed like a good idea ...
4274  * but it's mostly hex junk and the only sensible thing is the
4275  * hw/ucode revision (which we know anyway).  Since it's here,
4276  * I'll just leave it in, just in case e.g. the Intel guys want to
4277  * help us decipher some "ADVANCED_SYSASSERT" later.
4278  */
4279 static void
4280 iwm_nic_error(struct iwm_softc *sc)
4281 {
4282         struct iwm_error_event_table table;
4283         uint32_t base;
4284
4285         device_printf(sc->sc_dev, "dumping device error log\n");
4286         base = sc->sc_uc.uc_error_event_table;
4287         if (base < 0x800000 || base >= 0x80C000) {
4288                 device_printf(sc->sc_dev,
4289                     "Not valid error log pointer 0x%08x\n", base);
4290                 return;
4291         }
4292
4293         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4294                 device_printf(sc->sc_dev, "reading errlog failed\n");
4295                 return;
4296         }
4297
4298         if (!table.valid) {
4299                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4300                 return;
4301         }
4302
4303         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4304                 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4305                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4306                     sc->sc_flags, table.valid);
4307         }
4308
4309         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4310                 iwm_desc_lookup(table.error_id));
4311         device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4312         device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4313         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4314         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4315         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4316         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4317         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4318         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4319         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4320         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4321         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4322         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4323         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4324         device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4325         device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4326         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4327         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4328         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4329         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4330         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4331         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4332         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4333         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4334         device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4335         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4336         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4337         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4338         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4339         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4340         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4341         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4342         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4343 }
4344 #endif
4345
4346 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4347 do {                                                                    \
4348         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4349         _var_ = (void *)((_pkt_)+1);                                    \
4350 } while (/*CONSTCOND*/0)
4351
4352 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4353 do {                                                                    \
4354         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4355         _ptr_ = (void *)((_pkt_)+1);                                    \
4356 } while (/*CONSTCOND*/0)
4357
4358 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4359
4360 /*
4361  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4362  * Basic structure from if_iwn
4363  */
4364 static void
4365 iwm_notif_intr(struct iwm_softc *sc)
4366 {
4367         uint16_t hw;
4368
4369         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4370             BUS_DMASYNC_POSTREAD);
4371
4372         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4373
4374         /*
4375          * Process responses
4376          */
4377         while (sc->rxq.cur != hw) {
4378                 struct iwm_rx_ring *ring = &sc->rxq;
4379                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4380                 struct iwm_rx_packet *pkt;
4381                 struct iwm_cmd_response *cresp;
4382                 int qid, idx;
4383
4384                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4385                     BUS_DMASYNC_POSTREAD);
4386                 pkt = mtod(data->m, struct iwm_rx_packet *);
4387
4388                 qid = pkt->hdr.qid & ~0x80;
4389                 idx = pkt->hdr.idx;
4390
4391                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4392                     "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4393                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4394                     pkt->hdr.code, sc->rxq.cur, hw);
4395
4396                 /*
4397                  * randomly get these from the firmware, no idea why.
4398                  * they at least seem harmless, so just ignore them for now
4399                  */
4400                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4401                     || pkt->len_n_flags == htole32(0x55550000))) {
4402                         ADVANCE_RXQ(sc);
4403                         continue;
4404                 }
4405
4406                 switch (pkt->hdr.code) {
4407                 case IWM_REPLY_RX_PHY_CMD:
4408                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4409                         break;
4410
4411                 case IWM_REPLY_RX_MPDU_CMD:
4412                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4413                         break;
4414
4415                 case IWM_TX_CMD:
4416                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
4417                         break;
4418
4419                 case IWM_MISSED_BEACONS_NOTIFICATION: {
4420                         struct iwm_missed_beacons_notif *resp;
4421                         int missed;
4422
4423                         /* XXX look at mac_id to determine interface ID */
4424                         struct ieee80211com *ic = sc->sc_ic;
4425                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4426
4427                         SYNC_RESP_STRUCT(resp, pkt);
4428                         missed = le32toh(resp->consec_missed_beacons);
4429
4430                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4431                             "%s: MISSED_BEACON: mac_id=%d, "
4432                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4433                             "num_rx=%d\n",
4434                             __func__,
4435                             le32toh(resp->mac_id),
4436                             le32toh(resp->consec_missed_beacons_since_last_rx),
4437                             le32toh(resp->consec_missed_beacons),
4438                             le32toh(resp->num_expected_beacons),
4439                             le32toh(resp->num_recvd_beacons));
4440
4441                         /* Be paranoid */
4442                         if (vap == NULL)
4443                                 break;
4444
4445                         /* XXX no net80211 locking? */
4446                         if (vap->iv_state == IEEE80211_S_RUN &&
4447                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4448                                 if (missed > vap->iv_bmissthreshold) {
4449                                         /* XXX bad locking; turn into task */
4450                                         IWM_UNLOCK(sc);
4451                                         ieee80211_beacon_miss(ic);
4452                                         IWM_LOCK(sc);
4453                                 }
4454                         }
4455
4456                         break; }
4457
4458                 case IWM_MVM_ALIVE: {
4459                         struct iwm_mvm_alive_resp *resp;
4460                         SYNC_RESP_STRUCT(resp, pkt);
4461
4462                         sc->sc_uc.uc_error_event_table
4463                             = le32toh(resp->error_event_table_ptr);
4464                         sc->sc_uc.uc_log_event_table
4465                             = le32toh(resp->log_event_table_ptr);
4466                         sc->sched_base = le32toh(resp->scd_base_ptr);
4467                         sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4468
4469                         sc->sc_uc.uc_intr = 1;
4470                         wakeup(&sc->sc_uc);
4471                         break; }
4472
4473                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4474                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
4475                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
4476
4477                         iwm_phy_db_set_section(sc, phy_db_notif);
4478
4479                         break; }
4480
4481                 case IWM_STATISTICS_NOTIFICATION: {
4482                         struct iwm_notif_statistics *stats;
4483                         SYNC_RESP_STRUCT(stats, pkt);
4484                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4485                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
4486                         break; }
4487
4488                 case IWM_NVM_ACCESS_CMD:
4489                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4490                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4491                                     BUS_DMASYNC_POSTREAD);
4492                                 memcpy(sc->sc_cmd_resp,
4493                                     pkt, sizeof(sc->sc_cmd_resp));
4494                         }
4495                         break;
4496
4497                 case IWM_PHY_CONFIGURATION_CMD:
4498                 case IWM_TX_ANT_CONFIGURATION_CMD:
4499                 case IWM_ADD_STA:
4500                 case IWM_MAC_CONTEXT_CMD:
4501                 case IWM_REPLY_SF_CFG_CMD:
4502                 case IWM_POWER_TABLE_CMD:
4503                 case IWM_PHY_CONTEXT_CMD:
4504                 case IWM_BINDING_CONTEXT_CMD:
4505                 case IWM_TIME_EVENT_CMD:
4506                 case IWM_SCAN_REQUEST_CMD:
4507                 case IWM_REPLY_BEACON_FILTERING_CMD:
4508                 case IWM_MAC_PM_POWER_TABLE:
4509                 case IWM_TIME_QUOTA_CMD:
4510                 case IWM_REMOVE_STA:
4511                 case IWM_TXPATH_FLUSH:
4512                 case IWM_LQ_CMD:
4513                         SYNC_RESP_STRUCT(cresp, pkt);
4514                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4515                                 memcpy(sc->sc_cmd_resp,
4516                                     pkt, sizeof(*pkt)+sizeof(*cresp));
4517                         }
4518                         break;
4519
4520                 /* ignore */
4521                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4522                         break;
4523
4524                 case IWM_INIT_COMPLETE_NOTIF:
4525                         sc->sc_init_complete = 1;
4526                         wakeup(&sc->sc_init_complete);
4527                         break;
4528
4529                 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4530                         struct iwm_scan_complete_notif *notif;
4531                         SYNC_RESP_STRUCT(notif, pkt);
4532                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4533                         break; }
4534
4535                 case IWM_REPLY_ERROR: {
4536                         struct iwm_error_resp *resp;
4537                         SYNC_RESP_STRUCT(resp, pkt);
4538
4539                         device_printf(sc->sc_dev,
4540                             "firmware error 0x%x, cmd 0x%x\n",
4541                             le32toh(resp->error_type),
4542                             resp->cmd_id);
4543                         break; }
4544
4545                 case IWM_TIME_EVENT_NOTIFICATION: {
4546                         struct iwm_time_event_notif *notif;
4547                         SYNC_RESP_STRUCT(notif, pkt);
4548
4549                         if (notif->status) {
4550                                 if (le32toh(notif->action) &
4551                                     IWM_TE_V2_NOTIF_HOST_EVENT_START)
4552                                         sc->sc_auth_prot = 2;
4553                                 else
4554                                         sc->sc_auth_prot = 0;
4555                         } else {
4556                                 sc->sc_auth_prot = -1;
4557                         }
4558                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4559                             "%s: time event notification auth_prot=%d\n",
4560                                 __func__, sc->sc_auth_prot);
4561
4562                         wakeup(&sc->sc_auth_prot);
4563                         break; }
4564
4565                 case IWM_MCAST_FILTER_CMD:
4566                         break;
4567
4568                 default:
4569                         device_printf(sc->sc_dev,
4570                             "cmd %04x frame %d/%d %x UNHANDLED (this should "
4571                             "not happen)\n",
4572                             pkt->hdr.code, qid, idx,
4573                             pkt->len_n_flags);
4574                         panic("unhandled command");
4575                         break;
4576                 }
4577
4578                 /*
4579                  * Why test bit 0x80?  The Linux driver:
4580                  *
4581                  * There is one exception:  uCode sets bit 15 when it
4582                  * originates the response/notification, i.e. when the
4583                  * response/notification is not a direct response to a
4584                  * command sent by the driver.  For example, uCode issues
4585                  * IWM_REPLY_RX when it sends a received frame to the driver;
4586                  * it is not a direct response to any driver command.
4587                  *
4588                  * Ok, so since when is 7 == 15?  Well, the Linux driver
4589                  * uses a slightly different format for pkt->hdr, and "qid"
4590                  * is actually the upper byte of a two-byte field.
4591                  */
4592                 if (!(pkt->hdr.qid & (1 << 7))) {
4593                         iwm_cmd_done(sc, pkt);
4594                 }
4595
4596                 ADVANCE_RXQ(sc);
4597         }
4598
4599         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4600             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4601
4602         /*
4603          * Tell the firmware what we have processed.
4604          * Seems like the hardware gets upset unless we align
4605          * the write by 8??
4606          */
4607         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4608         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4609 }
4610
4611 static void
4612 iwm_intr(void *arg)
4613 {
4614         struct iwm_softc *sc = arg;
4615         struct ifnet *ifp = sc->sc_ifp;
4616         int handled = 0;
4617         int r1, r2, rv = 0;
4618         int isperiodic = 0;
4619
4620 #if defined(__DragonFly__)
4621         if (sc->sc_mem == NULL) {
4622                 kprintf("iwm_intr: detached\n");
4623                 return;
4624         }
4625 #endif
4626         IWM_LOCK(sc);
4627         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4628
4629         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4630                 uint32_t *ict = sc->ict_dma.vaddr;
4631                 int tmp;
4632
4633                 tmp = htole32(ict[sc->ict_cur]);
4634                 if (!tmp)
4635                         goto out_ena;
4636
4637                 /*
4638                  * ok, there was something.  keep plowing until we have all.
4639                  */
4640                 r1 = r2 = 0;
4641                 while (tmp) {
4642                         r1 |= tmp;
4643                         ict[sc->ict_cur] = 0;
4644                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4645                         tmp = htole32(ict[sc->ict_cur]);
4646                 }
4647
4648                 /* this is where the fun begins.  don't ask */
4649                 if (r1 == 0xffffffff)
4650                         r1 = 0;
4651
4652                 /* i am not expected to understand this */
4653                 if (r1 & 0xc0000)
4654                         r1 |= 0x8000;
4655                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4656         } else {
4657                 r1 = IWM_READ(sc, IWM_CSR_INT);
4658                 /* "hardware gone" (where, fishing?) */
4659                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4660                         goto out;
4661                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4662         }
4663         if (r1 == 0 && r2 == 0) {
4664                 goto out_ena;
4665         }
4666
4667         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4668
4669         /* ignored */
4670         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4671
4672         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4673 #ifdef IWM_DEBUG
4674                 int i;
4675                 struct ieee80211com *ic = sc->sc_ic;
4676                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4677
4678                 iwm_nic_error(sc);
4679
4680                 /* Dump driver status (TX and RX rings) while we're here. */
4681                 device_printf(sc->sc_dev, "driver status:\n");
4682                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4683                         struct iwm_tx_ring *ring = &sc->txq[i];
4684                         device_printf(sc->sc_dev,
4685                             "  tx ring %2d: qid=%-2d cur=%-3d "
4686                             "queued=%-3d\n",
4687                             i, ring->qid, ring->cur, ring->queued);
4688                 }
4689                 device_printf(sc->sc_dev,
4690                     "  rx ring: cur=%d\n", sc->rxq.cur);
4691                 device_printf(sc->sc_dev,
4692                     "  802.11 state %d\n", vap->iv_state);
4693 #endif
4694
4695                 device_printf(sc->sc_dev, "fatal firmware error\n");
4696                 ifp->if_flags &= ~IFF_UP;
4697                 iwm_stop_locked(ifp);
4698                 rv = 1;
4699                 goto out;
4700
4701         }
4702
4703         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4704                 handled |= IWM_CSR_INT_BIT_HW_ERR;
4705                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4706                 ifp->if_flags &= ~IFF_UP;
4707                 iwm_stop_locked(ifp);
4708                 rv = 1;
4709                 goto out;
4710         }
4711
4712         /* firmware chunk loaded */
4713         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4714                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4715                 handled |= IWM_CSR_INT_BIT_FH_TX;
4716                 sc->sc_fw_chunk_done = 1;
4717                 wakeup(&sc->sc_fw);
4718         }
4719
4720         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4721                 handled |= IWM_CSR_INT_BIT_RF_KILL;
4722                 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
4723                         device_printf(sc->sc_dev,
4724                             "%s: rfkill switch, disabling interface\n",
4725                             __func__);
4726                         ifp->if_flags &= ~IFF_UP;
4727                         iwm_stop_locked(ifp);
4728                 }
4729         }
4730
4731         /*
4732          * The Linux driver uses periodic interrupts to avoid races.
4733          * We cargo-cult like it's going out of fashion.
4734          */
4735         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4736                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4737                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4738                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4739                         IWM_WRITE_1(sc,
4740                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4741                 isperiodic = 1;
4742         }
4743
4744         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4745                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4746                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4747
4748                 iwm_notif_intr(sc);
4749
4750                 /* enable periodic interrupt, see above */
4751                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4752                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4753                             IWM_CSR_INT_PERIODIC_ENA);
4754         }
4755
4756         if (__predict_false(r1 & ~handled))
4757                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4758                     "%s: unhandled interrupts: %x\n", __func__, r1);
4759         rv = 1;
4760
4761  out_ena:
4762         iwm_restore_interrupts(sc);
4763  out:
4764         IWM_UNLOCK(sc);
4765         return;
4766 }
4767
4768 /*
4769  * Autoconf glue-sniffing
4770  */
4771 #define PCI_VENDOR_INTEL                0x8086
4772 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
4773 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
4774 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
4775 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
4776 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
4777 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
4778
4779 static const struct iwm_devices {
4780         uint16_t        device;
4781         const char      *name;
4782 } iwm_devices[] = {
4783         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4784         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4785         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4786         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4787         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4788         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4789 };
4790
4791 static int
4792 iwm_probe(device_t dev)
4793 {
4794         int i;
4795
4796         for (i = 0; i < nitems(iwm_devices); i++) {
4797                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4798                     pci_get_device(dev) == iwm_devices[i].device) {
4799                         device_set_desc(dev, iwm_devices[i].name);
4800                         return (BUS_PROBE_DEFAULT);
4801                 }
4802         }
4803
4804         return (ENXIO);
4805 }
4806
4807 static int
4808 iwm_dev_check(device_t dev)
4809 {
4810         struct iwm_softc *sc;
4811
4812         sc = device_get_softc(dev);
4813
4814         switch (pci_get_device(dev)) {
4815         case PCI_PRODUCT_INTEL_WL_3160_1:
4816         case PCI_PRODUCT_INTEL_WL_3160_2:
4817                 sc->sc_fwname = "iwm3160fw";
4818                 sc->host_interrupt_operation_mode = 1;
4819                 return (0);
4820         case PCI_PRODUCT_INTEL_WL_7260_1:
4821         case PCI_PRODUCT_INTEL_WL_7260_2:
4822                 sc->sc_fwname = "iwm7260fw";
4823                 sc->host_interrupt_operation_mode = 1;
4824                 return (0);
4825         case PCI_PRODUCT_INTEL_WL_7265_1:
4826         case PCI_PRODUCT_INTEL_WL_7265_2:
4827                 sc->sc_fwname = "iwm7265fw";
4828                 sc->host_interrupt_operation_mode = 0;
4829                 return (0);
4830         default:
4831                 device_printf(dev, "unknown adapter type\n");
4832                 return ENXIO;
4833         }
4834 }
4835
4836 static int
4837 iwm_pci_attach(device_t dev)
4838 {
4839         struct iwm_softc *sc;
4840         int count, error, rid;
4841         uint16_t reg;
4842 #if defined(__DragonFly__)
4843         int irq_flags;
4844 #endif
4845
4846         sc = device_get_softc(dev);
4847
4848         /* Clear device-specific "PCI retry timeout" register (41h). */
4849         reg = pci_read_config(dev, 0x40, sizeof(reg));
4850         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4851
4852         /* Enable bus-mastering and hardware bug workaround. */
4853         pci_enable_busmaster(dev);
4854         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4855         /* if !MSI */
4856         if (reg & PCIM_STATUS_INTxSTATE) {
4857                 reg &= ~PCIM_STATUS_INTxSTATE;
4858         }
4859         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4860
4861         rid = PCIR_BAR(0);
4862         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4863             RF_ACTIVE);
4864         if (sc->sc_mem == NULL) {
4865                 device_printf(sc->sc_dev, "can't map mem space\n");
4866                 return (ENXIO);
4867         }
4868         sc->sc_st = rman_get_bustag(sc->sc_mem);
4869         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4870
4871         /* Install interrupt handler. */
4872         count = 1;
4873         rid = 0;
4874 #if defined(__DragonFly__)
4875         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4876         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4877 #else
4878         if (pci_alloc_msi(dev, &count) == 0)
4879                 rid = 1;
4880         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4881             (rid != 0 ? 0 : RF_SHAREABLE));
4882 #endif
4883         if (sc->sc_irq == NULL) {
4884                 device_printf(dev, "can't map interrupt\n");
4885                         return (ENXIO);
4886         }
4887 #if defined(__DragonFly__)
4888         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4889                                iwm_intr, sc, &sc->sc_ih,
4890                                &wlan_global_serializer);
4891 #else
4892         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4893             NULL, iwm_intr, sc, &sc->sc_ih);
4894 #endif
4895         if (sc->sc_ih == NULL) {
4896                 device_printf(dev, "can't establish interrupt");
4897 #if defined(__DragonFly__)
4898                 pci_release_msi(dev);
4899 #endif
4900                         return (ENXIO);
4901         }
4902         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4903
4904         return (0);
4905 }
4906
4907 static void
4908 iwm_pci_detach(device_t dev)
4909 {
4910         struct iwm_softc *sc = device_get_softc(dev);
4911
4912         if (sc->sc_irq != NULL) {
4913                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4914                 bus_release_resource(dev, SYS_RES_IRQ,
4915                     rman_get_rid(sc->sc_irq), sc->sc_irq);
4916                 pci_release_msi(dev);
4917 #if defined(__DragonFly__)
4918                 sc->sc_irq = NULL;
4919 #endif
4920         }
4921         if (sc->sc_mem != NULL) {
4922                 bus_release_resource(dev, SYS_RES_MEMORY,
4923                     rman_get_rid(sc->sc_mem), sc->sc_mem);
4924 #if defined(__DragonFly__)
4925                 sc->sc_mem = NULL;
4926 #endif
4927         }
4928 }
4929
4930
4931
4932 static int
4933 iwm_attach(device_t dev)
4934 {
4935         struct iwm_softc *sc;
4936         struct ieee80211com *ic;
4937         struct ifnet *ifp;
4938         int error;
4939         int txq_i, i;
4940
4941         sc = device_get_softc(dev);
4942         sc->sc_dev = dev;
4943 #if defined(__DragonFly__)
4944         lockinit(&sc->sc_lk, "iwm_lk", 0, 0);
4945         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4946 #else
4947         mtx_init(&sc->sc_mtx, "iwm_mtx", MTX_DEF, 0);
4948         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4949 #endif
4950         callout_init(&sc->sc_led_blink_to);
4951         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4952         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4953             taskqueue_thread_enqueue, &sc->sc_tq);
4954 #if defined(__DragonFly__)
4955         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
4956                                         -1, "iwm_taskq");
4957 #else
4958         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4959 #endif
4960         if (error != 0) {
4961                 device_printf(dev, "can't start threads, error %d\n",
4962                     error);
4963                 goto fail;
4964         }
4965
4966         /* PCI attach */
4967         error = iwm_pci_attach(dev);
4968         if (error != 0)
4969                 goto fail;
4970
4971         sc->sc_wantresp = -1;
4972
4973         /* Check device type */
4974         error = iwm_dev_check(dev);
4975         if (error != 0)
4976                 goto fail;
4977
4978         sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4979
4980         /*
4981          * We now start fiddling with the hardware
4982          */
4983         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4984         if (iwm_prepare_card_hw(sc) != 0) {
4985                 device_printf(dev, "could not initialize hardware\n");
4986                 goto fail;
4987         }
4988
4989         /* Allocate DMA memory for firmware transfers. */
4990         if ((error = iwm_alloc_fwmem(sc)) != 0) {
4991                 device_printf(dev, "could not allocate memory for firmware\n");
4992                 goto fail;
4993         }
4994
4995         /* Allocate "Keep Warm" page. */
4996         if ((error = iwm_alloc_kw(sc)) != 0) {
4997                 device_printf(dev, "could not allocate keep warm page\n");
4998                 goto fail;
4999         }
5000
5001         /* We use ICT interrupts */
5002         if ((error = iwm_alloc_ict(sc)) != 0) {
5003                 device_printf(dev, "could not allocate ICT table\n");
5004                 goto fail;
5005         }
5006
5007         /* Allocate TX scheduler "rings". */
5008         if ((error = iwm_alloc_sched(sc)) != 0) {
5009                 device_printf(dev, "could not allocate TX scheduler rings\n");
5010                 goto fail;
5011         }
5012
5013         /* Allocate TX rings */
5014         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5015                 if ((error = iwm_alloc_tx_ring(sc,
5016                     &sc->txq[txq_i], txq_i)) != 0) {
5017                         device_printf(dev,
5018                             "could not allocate TX ring %d\n",
5019                             txq_i);
5020                         goto fail;
5021                 }
5022         }
5023
5024         /* Allocate RX ring. */
5025         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5026                 device_printf(dev, "could not allocate RX ring\n");
5027                 goto fail;
5028         }
5029
5030         /* Clear pending interrupts. */
5031         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5032
5033         sc->sc_ifp = ifp = if_alloc(IFT_IEEE80211);
5034         if (ifp == NULL) {
5035                 goto fail;
5036         }
5037         ifp->if_softc = sc;
5038         if_initname(ifp, "iwm", device_get_unit(dev));
5039         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
5040         ifp->if_init = iwm_init;
5041         ifp->if_ioctl = iwm_ioctl;
5042         ifp->if_start = iwm_start;
5043 #if defined(__DragonFly__)
5044         ifp->if_nmbjclusters = IWM_RX_RING_COUNT;
5045         ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
5046 #else
5047         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
5048         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
5049         IFQ_SET_READY(&ifp->if_snd);
5050 #endif
5051
5052         /*
5053          * Set it here so we can initialise net80211.
5054          * But, if we fail before we call net80211_ifattach(),
5055          * we can't just call iwm_detach() or it'll free
5056          * net80211 without it having been setup.
5057          */
5058         sc->sc_ic = ic = ifp->if_l2com;
5059         ic->ic_ifp = ifp;
5060         ic->ic_softc = sc;
5061         ic->ic_name = device_get_nameunit(sc->sc_dev);
5062         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5063         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5064
5065         /* Set device capabilities. */
5066         ic->ic_caps =
5067             IEEE80211_C_STA |
5068             IEEE80211_C_WPA |           /* WPA/RSN */
5069             IEEE80211_C_WME |
5070             IEEE80211_C_SHSLOT |        /* short slot time supported */
5071             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5072 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5073             ;
5074         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5075                 sc->sc_phyctxt[i].id = i;
5076                 sc->sc_phyctxt[i].color = 0;
5077                 sc->sc_phyctxt[i].ref = 0;
5078                 sc->sc_phyctxt[i].channel = NULL;
5079         }
5080
5081         /* Max RSSI */
5082         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5083         sc->sc_preinit_hook.ich_func = iwm_preinit;
5084         sc->sc_preinit_hook.ich_arg = sc;
5085         sc->sc_preinit_hook.ich_desc = "iwm";
5086         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5087                 device_printf(dev, "config_intrhook_establish failed\n");
5088                 goto fail;
5089         }
5090
5091 #ifdef IWM_DEBUG
5092         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5093             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5094             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5095 #endif
5096
5097         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5098             "<-%s\n", __func__);
5099
5100         return 0;
5101
5102         /* Free allocated memory if something failed during attachment. */
5103 fail:
5104         iwm_detach_local(sc, 0);
5105
5106         return ENXIO;
5107 }
5108
5109 static int
5110 iwm_update_edca(struct ieee80211com *ic)
5111 {
5112         struct iwm_softc *sc = ic->ic_softc;
5113
5114         device_printf(sc->sc_dev, "%s: called\n", __func__);
5115         return (0);
5116 }
5117
5118 static void
5119 iwm_preinit(void *arg)
5120 {
5121         struct iwm_softc *sc = arg;
5122         device_t dev = sc->sc_dev;
5123         struct ieee80211com *ic = sc->sc_ic;
5124         int error;
5125
5126         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5127             "->%s\n", __func__);
5128
5129         IWM_LOCK(sc);
5130         if ((error = iwm_start_hw(sc)) != 0) {
5131                 device_printf(dev, "could not initialize hardware\n");
5132                 IWM_UNLOCK(sc);
5133                 goto fail;
5134         }
5135
5136         error = iwm_run_init_mvm_ucode(sc, 1);
5137         iwm_stop_device(sc);
5138         if (error) {
5139                 IWM_UNLOCK(sc);
5140                 goto fail;
5141         }
5142         device_printf(dev,
5143             "revision 0x%x, firmware %d.%d (API ver. %d)\n",
5144             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5145             IWM_UCODE_MAJOR(sc->sc_fwver),
5146             IWM_UCODE_MINOR(sc->sc_fwver),
5147             IWM_UCODE_API(sc->sc_fwver));
5148
5149         /* not all hardware can do 5GHz band */
5150         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5151                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5152                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5153         IWM_UNLOCK(sc);
5154
5155         /*
5156          * At this point we've committed - if we fail to do setup,
5157          * we now also have to tear down the net80211 state.
5158          */
5159         wlan_serialize_enter();
5160         ieee80211_ifattach(ic, sc->sc_bssid);
5161         wlan_serialize_exit();
5162         ic->ic_vap_create = iwm_vap_create;
5163         ic->ic_vap_delete = iwm_vap_delete;
5164         ic->ic_raw_xmit = iwm_raw_xmit;
5165         ic->ic_node_alloc = iwm_node_alloc;
5166         ic->ic_scan_start = iwm_scan_start;
5167         ic->ic_scan_end = iwm_scan_end;
5168         ic->ic_update_mcast = iwm_update_mcast;
5169         ic->ic_set_channel = iwm_set_channel;
5170         ic->ic_scan_curchan = iwm_scan_curchan;
5171         ic->ic_scan_mindwell = iwm_scan_mindwell;
5172         ic->ic_wme.wme_update = iwm_update_edca;
5173         iwm_radiotap_attach(sc);
5174         if (bootverbose)
5175                 ieee80211_announce(ic);
5176
5177         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5178             "<-%s\n", __func__);
5179         config_intrhook_disestablish(&sc->sc_preinit_hook);
5180
5181         return;
5182 fail:
5183         config_intrhook_disestablish(&sc->sc_preinit_hook);
5184         iwm_detach_local(sc, 0);
5185 }
5186
5187 /*
5188  * Attach the interface to 802.11 radiotap.
5189  */
5190 static void
5191 iwm_radiotap_attach(struct iwm_softc *sc)
5192 {
5193         struct ifnet *ifp = sc->sc_ifp;
5194         struct ieee80211com *ic = ifp->if_l2com;
5195
5196         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5197             "->%s begin\n", __func__);
5198         ieee80211_radiotap_attach(ic,
5199             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5200                 IWM_TX_RADIOTAP_PRESENT,
5201             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5202                 IWM_RX_RADIOTAP_PRESENT);
5203         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5204             "->%s end\n", __func__);
5205 }
5206
5207 static struct ieee80211vap *
5208 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5209     enum ieee80211_opmode opmode, int flags,
5210     const uint8_t bssid[IEEE80211_ADDR_LEN],
5211     const uint8_t mac[IEEE80211_ADDR_LEN])
5212 {
5213         struct iwm_vap *ivp;
5214         struct ieee80211vap *vap;
5215         uint8_t mac1[IEEE80211_ADDR_LEN];
5216
5217         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5218                 return NULL;
5219         IEEE80211_ADDR_COPY(mac1, mac);
5220         ivp = (struct iwm_vap *) kmalloc(sizeof(struct iwm_vap),
5221                                         M_80211_VAP, M_INTWAIT | M_ZERO);
5222         vap = &ivp->iv_vap;
5223         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
5224         IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
5225         vap->iv_bmissthreshold = 10;            /* override default */
5226         /* Override with driver methods. */
5227         ivp->iv_newstate = vap->iv_newstate;
5228         vap->iv_newstate = iwm_newstate;
5229
5230         ieee80211_ratectl_init(vap);
5231         /* Complete setup. */
5232         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status);
5233         ic->ic_opmode = opmode;
5234
5235         return vap;
5236 }
5237
5238 static void
5239 iwm_vap_delete(struct ieee80211vap *vap)
5240 {
5241         struct iwm_vap *ivp = IWM_VAP(vap);
5242
5243         ieee80211_ratectl_deinit(vap);
5244         ieee80211_vap_detach(vap);
5245         kfree(ivp, M_80211_VAP);
5246 }
5247
5248 static void
5249 iwm_scan_start(struct ieee80211com *ic)
5250 {
5251         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5252         struct iwm_softc *sc = ic->ic_softc;
5253         int error;
5254
5255         if (sc->sc_scanband)
5256                 return;
5257         IWM_LOCK(sc);
5258         error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5259         if (error) {
5260                 device_printf(sc->sc_dev, "could not initiate scan\n");
5261                 IWM_UNLOCK(sc);
5262                 wlan_serialize_enter();
5263                 ieee80211_cancel_scan(vap);
5264                 wlan_serialize_exit();
5265         } else {
5266                 iwm_led_blink_start(sc);
5267                 IWM_UNLOCK(sc);
5268         }
5269 }
5270
5271 static void
5272 iwm_scan_end(struct ieee80211com *ic)
5273 {
5274         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5275         struct iwm_softc *sc = ic->ic_softc;
5276
5277         IWM_LOCK(sc);
5278         iwm_led_blink_stop(sc);
5279         if (vap->iv_state == IEEE80211_S_RUN)
5280                 iwm_mvm_led_enable(sc);
5281         IWM_UNLOCK(sc);
5282 }
5283
5284 static void
5285 iwm_update_mcast(struct ieee80211com *ic)
5286 {
5287 }
5288
5289 static void
5290 iwm_set_channel(struct ieee80211com *ic)
5291 {
5292 }
5293
5294 static void
5295 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5296 {
5297 }
5298
5299 static void
5300 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5301 {
5302         return;
5303 }
5304
5305 void
5306 iwm_init_task(void *arg1)
5307 {
5308         struct iwm_softc *sc = arg1;
5309         struct ifnet *ifp = sc->sc_ifp;
5310
5311         IWM_LOCK(sc);
5312         while (sc->sc_flags & IWM_FLAG_BUSY) {
5313 #if defined(__DragonFly__)
5314                 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5315 #else
5316                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5317 #endif
5318 }
5319         sc->sc_flags |= IWM_FLAG_BUSY;
5320         iwm_stop_locked(ifp);
5321 #if defined(__DragonFly__)
5322         if ((ifp->if_flags & IFF_UP) &&
5323             (ifp->if_flags & IFF_RUNNING))
5324 #else
5325         if ((ifp->if_flags & IFF_UP) &&
5326             (ifp->if_drv_flags & IFF_DRV_RUNNING))
5327 #endif
5328                 iwm_init(sc);
5329         sc->sc_flags &= ~IWM_FLAG_BUSY;
5330         wakeup(&sc->sc_flags);
5331         IWM_UNLOCK(sc);
5332 }
5333
5334 static int
5335 iwm_resume(device_t dev)
5336 {
5337         uint16_t reg;
5338
5339         /* Clear device-specific "PCI retry timeout" register (41h). */
5340         reg = pci_read_config(dev, 0x40, sizeof(reg));
5341         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5342         iwm_init_task(device_get_softc(dev));
5343
5344         return 0;
5345 }
5346
5347 static int
5348 iwm_suspend(device_t dev)
5349 {
5350         struct iwm_softc *sc = device_get_softc(dev);
5351         struct ifnet *ifp = sc->sc_ifp;
5352
5353 #if defined(__DragonFly__)
5354         if (ifp->if_flags & IFF_RUNNING)
5355 #else
5356         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5357 #endif
5358                 iwm_stop(ifp, 0);
5359
5360         return (0);
5361 }
5362
5363 static int
5364 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5365 {
5366         struct ifnet *ifp = sc->sc_ifp;
5367         struct ieee80211com *ic;
5368         struct iwm_fw_info *fw = &sc->sc_fw;
5369         device_t dev = sc->sc_dev;
5370         int i;
5371
5372         if (sc->sc_tq) {
5373 #if defined(__DragonFly__)
5374                 /* doesn't exist for DFly, DFly drains tasks on free */
5375 #else
5376                 taskqueue_drain_all(sc->sc_tq);
5377 #endif
5378                 taskqueue_free(sc->sc_tq);
5379 #if defined(__DragonFly__)
5380                 sc->sc_tq = NULL;
5381 #endif
5382         }
5383         if (ifp) {
5384                 callout_drain(&sc->sc_watchdog_to);
5385                 ic = sc->sc_ic;
5386                 iwm_stop_device(sc);
5387                 if (ic && do_net80211) {
5388                         wlan_serialize_enter();
5389                         ieee80211_ifdetach(ic);
5390                         wlan_serialize_exit();
5391                 }
5392                 if_free(ifp);
5393 #if defined(__DragonFly__)
5394                 sc->sc_ifp = NULL;
5395 #endif
5396         }
5397         callout_drain(&sc->sc_led_blink_to);
5398
5399         /* Free descriptor rings */
5400         for (i = 0; i < nitems(sc->txq); i++)
5401                 iwm_free_tx_ring(sc, &sc->txq[i]);
5402
5403         /* Free firmware */
5404         if (fw->fw_fp != NULL)
5405                 iwm_fw_info_free(fw);
5406
5407         /* free scheduler */
5408         iwm_free_sched(sc);
5409         if (sc->ict_dma.vaddr != NULL)
5410                 iwm_free_ict(sc);
5411         if (sc->kw_dma.vaddr != NULL)
5412                 iwm_free_kw(sc);
5413         if (sc->fw_dma.vaddr != NULL)
5414                 iwm_free_fwmem(sc);
5415
5416         /* Finished with the hardware - detach things */
5417         iwm_pci_detach(dev);
5418
5419         lockuninit(&sc->sc_lk);
5420
5421         return (0);
5422 }
5423
5424 static int
5425 iwm_detach(device_t dev)
5426 {
5427         struct iwm_softc *sc = device_get_softc(dev);
5428         int error;
5429
5430         error = iwm_detach_local(sc, 1);
5431
5432         return error;
5433 }
5434
5435 static device_method_t iwm_pci_methods[] = {
5436         /* Device interface */
5437         DEVMETHOD(device_probe,         iwm_probe),
5438         DEVMETHOD(device_attach,        iwm_attach),
5439         DEVMETHOD(device_detach,        iwm_detach),
5440         DEVMETHOD(device_suspend,       iwm_suspend),
5441         DEVMETHOD(device_resume,        iwm_resume),
5442
5443         DEVMETHOD_END
5444 };
5445
5446 static driver_t iwm_pci_driver = {
5447         "iwm",
5448         iwm_pci_methods,
5449         sizeof (struct iwm_softc)
5450 };
5451
5452 static devclass_t iwm_devclass;
5453
5454 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5455 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5456 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5457 MODULE_DEPEND(iwm, wlan, 1, 1, 1);