if_iwm - GC some dead code, left by a partially applied OpenBSD change.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/mutex.h>
147 #include <sys/module.h>
148 #include <sys/proc.h>
149 #include <sys/rman.h>
150 #include <sys/socket.h>
151 #include <sys/sockio.h>
152 #include <sys/sysctl.h>
153 #include <sys/linker.h>
154
155 #include <machine/endian.h>
156
157 #include <bus/pci/pcivar.h>
158 #include <bus/pci/pcireg.h>
159
160 #include <net/bpf.h>
161
162 #include <net/if.h>
163 #include <net/if_var.h>
164 #include <net/if_arp.h>
165 #include <net/if_dl.h>
166 #include <net/if_media.h>
167 #include <net/if_types.h>
168
169 #include <netinet/in.h>
170 #include <netinet/in_systm.h>
171 #include <netinet/if_ether.h>
172 #include <netinet/ip.h>
173
174 #include <netproto/802_11/ieee80211_var.h>
175 #include <netproto/802_11/ieee80211_regdomain.h>
176 #include <netproto/802_11/ieee80211_ratectl.h>
177 #include <netproto/802_11/ieee80211_radiotap.h>
178
179 #include "if_iwmreg.h"
180 #include "if_iwmvar.h"
181 #include "if_iwm_debug.h"
182 #include "if_iwm_util.h"
183 #include "if_iwm_binding.h"
184 #include "if_iwm_phy_db.h"
185 #include "if_iwm_mac_ctxt.h"
186 #include "if_iwm_phy_ctxt.h"
187 #include "if_iwm_time_event.h"
188 #include "if_iwm_power.h"
189 #include "if_iwm_scan.h"
190 #include "if_iwm_pcie_trans.h"
191 #include "if_iwm_led.h"
192
193 const uint8_t iwm_nvm_channels[] = {
194         /* 2.4 GHz */
195         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196         /* 5 GHz */
197         36, 40, 44, 48, 52, 56, 60, 64,
198         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
199         149, 153, 157, 161, 165
200 };
201 #define IWM_NUM_2GHZ_CHANNELS   14
202
203 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
204     "IWM_NUM_CHANNELS is too small");
205
206 /*
207  * XXX For now, there's simply a fixed set of rate table entries
208  * that are populated.
209  */
210 const struct iwm_rate {
211         uint8_t rate;
212         uint8_t plcp;
213 } iwm_rates[] = {
214         {   2,  IWM_RATE_1M_PLCP  },
215         {   4,  IWM_RATE_2M_PLCP  },
216         {  11,  IWM_RATE_5M_PLCP  },
217         {  22,  IWM_RATE_11M_PLCP },
218         {  12,  IWM_RATE_6M_PLCP  },
219         {  18,  IWM_RATE_9M_PLCP  },
220         {  24,  IWM_RATE_12M_PLCP },
221         {  36,  IWM_RATE_18M_PLCP },
222         {  48,  IWM_RATE_24M_PLCP },
223         {  72,  IWM_RATE_36M_PLCP },
224         {  96,  IWM_RATE_48M_PLCP },
225         { 108,  IWM_RATE_54M_PLCP },
226 };
227 #define IWM_RIDX_CCK    0
228 #define IWM_RIDX_OFDM   4
229 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
230 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
231 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
232
233 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
234 static int      iwm_firmware_store_section(struct iwm_softc *,
235                                            enum iwm_ucode_type,
236                                            const uint8_t *, size_t);
237 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
238 static void     iwm_fw_info_free(struct iwm_fw_info *);
239 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
240 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
241 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
242                                      bus_size_t, bus_size_t);
243 static void     iwm_dma_contig_free(struct iwm_dma_info *);
244 static int      iwm_alloc_fwmem(struct iwm_softc *);
245 static void     iwm_free_fwmem(struct iwm_softc *);
246 static int      iwm_alloc_sched(struct iwm_softc *);
247 static void     iwm_free_sched(struct iwm_softc *);
248 static int      iwm_alloc_kw(struct iwm_softc *);
249 static void     iwm_free_kw(struct iwm_softc *);
250 static int      iwm_alloc_ict(struct iwm_softc *);
251 static void     iwm_free_ict(struct iwm_softc *);
252 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
256                                   int);
257 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void     iwm_enable_interrupts(struct iwm_softc *);
260 static void     iwm_restore_interrupts(struct iwm_softc *);
261 static void     iwm_disable_interrupts(struct iwm_softc *);
262 static void     iwm_ict_reset(struct iwm_softc *);
263 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
264 static void     iwm_stop_device(struct iwm_softc *);
265 static void     iwm_mvm_nic_config(struct iwm_softc *);
266 static int      iwm_nic_rx_init(struct iwm_softc *);
267 static int      iwm_nic_tx_init(struct iwm_softc *);
268 static int      iwm_nic_init(struct iwm_softc *);
269 static void     iwm_enable_txq(struct iwm_softc *, int, int);
270 static int      iwm_post_alive(struct iwm_softc *);
271 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274                                      uint16_t *);
275 static uint32_t iwm_eeprom_channel_flags(uint16_t);
276 static void     iwm_add_channel_band(struct iwm_softc *,
277                     struct ieee80211_channel[], int, int *, int, int,
278                     const uint8_t[]);
279 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
280                     struct ieee80211_channel[]);
281 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282                                    const uint16_t *, const uint16_t *, uint8_t,
283                                    uint8_t);
284 struct iwm_nvm_section;
285 static int      iwm_parse_nvm_sections(struct iwm_softc *,
286                                        struct iwm_nvm_section *);
287 static int      iwm_nvm_init(struct iwm_softc *);
288 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
289                                         const uint8_t *, uint32_t);
290 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
291 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
292 static int      iwm_fw_alive(struct iwm_softc *, uint32_t);
293 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
294 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
295 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
296                                               enum iwm_ucode_type);
297 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
298 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
299 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
300 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
301                                             struct iwm_rx_phy_info *);
302 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
303                                       struct iwm_rx_packet *,
304                                       struct iwm_rx_data *);
305 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
306 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
307                                    struct iwm_rx_data *);
308 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
309                                          struct iwm_rx_packet *,
310                                          struct iwm_node *);
311 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
312                                   struct iwm_rx_data *);
313 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
314 #if 0
315 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
316                                  uint16_t);
317 #endif
318 static const struct iwm_rate *
319         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
320                         struct ieee80211_frame *, struct iwm_tx_cmd *);
321 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
322                        struct ieee80211_node *, int);
323 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
324                              const struct ieee80211_bpf_params *);
325 static void     iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
326                                              struct iwm_mvm_add_sta_cmd_v5 *);
327 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
328                                                 struct iwm_mvm_add_sta_cmd_v6 *,
329                                                 int *);
330 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
331                                        int);
332 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
333 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
334 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
335                                            struct iwm_int_sta *,
336                                            const uint8_t *, uint16_t, uint16_t);
337 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
338 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
339 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
340 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
341 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
342 static struct ieee80211_node *
343                 iwm_node_alloc(struct ieee80211vap *,
344                                const uint8_t[IEEE80211_ADDR_LEN]);
345 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
346 static int      iwm_media_change(struct ifnet *);
347 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
348 static void     iwm_endscan_cb(void *, int);
349 static int      iwm_init_hw(struct iwm_softc *);
350 static void     iwm_init(struct iwm_softc *);
351 static void     iwm_start(struct iwm_softc *);
352 static void     iwm_stop(struct iwm_softc *);
353 static void     iwm_watchdog(void *);
354 static void     iwm_parent(struct ieee80211com *);
355 #ifdef IWM_DEBUG
356 static const char *
357                 iwm_desc_lookup(uint32_t);
358 static void     iwm_nic_error(struct iwm_softc *);
359 #endif
360 static void     iwm_notif_intr(struct iwm_softc *);
361 static void     iwm_intr(void *);
362 static int      iwm_attach(device_t);
363 static void     iwm_preinit(void *);
364 static int      iwm_detach_local(struct iwm_softc *sc, int);
365 static void     iwm_init_task(void *);
366 static void     iwm_radiotap_attach(struct iwm_softc *);
367 static struct ieee80211vap *
368                 iwm_vap_create(struct ieee80211com *,
369                                const char [IFNAMSIZ], int,
370                                enum ieee80211_opmode, int,
371                                const uint8_t [IEEE80211_ADDR_LEN],
372                                const uint8_t [IEEE80211_ADDR_LEN]);
373 static void     iwm_vap_delete(struct ieee80211vap *);
374 static void     iwm_scan_start(struct ieee80211com *);
375 static void     iwm_scan_end(struct ieee80211com *);
376 static void     iwm_update_mcast(struct ieee80211com *);
377 static void     iwm_set_channel(struct ieee80211com *);
378 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
379 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
380 static int      iwm_detach(device_t);
381
382 #if defined(__DragonFly__)
383 static int      iwm_msi_enable = 1;
384
385 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
386
387 /*
388  * This is a hack due to the wlan_serializer deadlocking sleepers.
389  */
390 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
391
392 int
393 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
394 {
395         int error;
396
397         if (wlan_is_serialized()) {
398                 wlan_serialize_exit();
399                 kprintf("%s: have to release serializer for sleeping\n",
400                     __func__);
401                 error = lksleep(chan, lk, flags, wmesg, to);
402                 lockmgr(lk, LK_RELEASE);
403                 wlan_serialize_enter();
404                 lockmgr(lk, LK_EXCLUSIVE);
405         } else {
406                 error = lksleep(chan, lk, flags, wmesg, to);
407         }
408         return error;
409 }
410
411 #endif
412
413 /*
414  * Firmware parser.
415  */
416
417 static int
418 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
419 {
420         const struct iwm_fw_cscheme_list *l = (const void *)data;
421
422         if (dlen < sizeof(*l) ||
423             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
424                 return EINVAL;
425
426         /* we don't actually store anything for now, always use s/w crypto */
427
428         return 0;
429 }
430
431 static int
432 iwm_firmware_store_section(struct iwm_softc *sc,
433     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
434 {
435         struct iwm_fw_sects *fws;
436         struct iwm_fw_onesect *fwone;
437
438         if (type >= IWM_UCODE_TYPE_MAX)
439                 return EINVAL;
440         if (dlen < sizeof(uint32_t))
441                 return EINVAL;
442
443         fws = &sc->sc_fw.fw_sects[type];
444         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
445                 return EINVAL;
446
447         fwone = &fws->fw_sect[fws->fw_count];
448
449         /* first 32bit are device load offset */
450         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
451
452         /* rest is data */
453         fwone->fws_data = data + sizeof(uint32_t);
454         fwone->fws_len = dlen - sizeof(uint32_t);
455
456         fws->fw_count++;
457         fws->fw_totlen += fwone->fws_len;
458
459         return 0;
460 }
461
462 struct iwm_tlv_calib_data {
463         uint32_t ucode_type;
464         struct iwm_tlv_calib_ctrl calib;
465 } __packed;
466
467 static int
468 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
469 {
470         const struct iwm_tlv_calib_data *def_calib = data;
471         uint32_t ucode_type = le32toh(def_calib->ucode_type);
472
473         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
474                 device_printf(sc->sc_dev,
475                     "Wrong ucode_type %u for default "
476                     "calibration.\n", ucode_type);
477                 return EINVAL;
478         }
479
480         sc->sc_default_calib[ucode_type].flow_trigger =
481             def_calib->calib.flow_trigger;
482         sc->sc_default_calib[ucode_type].event_trigger =
483             def_calib->calib.event_trigger;
484
485         return 0;
486 }
487
488 static void
489 iwm_fw_info_free(struct iwm_fw_info *fw)
490 {
491         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
492         fw->fw_fp = NULL;
493         /* don't touch fw->fw_status */
494         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
495 }
496
497 static int
498 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
499 {
500         struct iwm_fw_info *fw = &sc->sc_fw;
501         const struct iwm_tlv_ucode_header *uhdr;
502         struct iwm_ucode_tlv tlv;
503         enum iwm_ucode_tlv_type tlv_type;
504         const struct firmware *fwp;
505         const uint8_t *data;
506         int error = 0;
507         size_t len;
508
509         if (fw->fw_status == IWM_FW_STATUS_DONE &&
510             ucode_type != IWM_UCODE_TYPE_INIT)
511                 return 0;
512
513         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
514 #if defined(__DragonFly__)
515                 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
516 #else
517                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
518 #endif
519         }
520         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
521
522         if (fw->fw_fp != NULL)
523                 iwm_fw_info_free(fw);
524
525         /*
526          * Load firmware into driver memory.
527          * fw_fp will be set.
528          */
529         IWM_UNLOCK(sc);
530         fwp = firmware_get(sc->sc_fwname);
531         IWM_LOCK(sc);
532         if (fwp == NULL) {
533                 device_printf(sc->sc_dev,
534                     "could not read firmware %s (error %d)\n",
535                     sc->sc_fwname, error);
536                 goto out;
537         }
538         fw->fw_fp = fwp;
539
540         /*
541          * Parse firmware contents
542          */
543
544         uhdr = (const void *)fw->fw_fp->data;
545         if (*(const uint32_t *)fw->fw_fp->data != 0
546             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
547                 device_printf(sc->sc_dev, "invalid firmware %s\n",
548                     sc->sc_fwname);
549                 error = EINVAL;
550                 goto out;
551         }
552
553         sc->sc_fwver = le32toh(uhdr->ver);
554         data = uhdr->data;
555         len = fw->fw_fp->datasize - sizeof(*uhdr);
556
557         while (len >= sizeof(tlv)) {
558                 size_t tlv_len;
559                 const void *tlv_data;
560
561                 memcpy(&tlv, data, sizeof(tlv));
562                 tlv_len = le32toh(tlv.length);
563                 tlv_type = le32toh(tlv.type);
564
565                 len -= sizeof(tlv);
566                 data += sizeof(tlv);
567                 tlv_data = data;
568
569                 if (len < tlv_len) {
570                         device_printf(sc->sc_dev,
571                             "firmware too short: %zu bytes\n",
572                             len);
573                         error = EINVAL;
574                         goto parse_out;
575                 }
576
577                 switch ((int)tlv_type) {
578                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
579                         if (tlv_len < sizeof(uint32_t)) {
580                                 device_printf(sc->sc_dev,
581                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
582                                     __func__,
583                                     (int) tlv_len);
584                                 error = EINVAL;
585                                 goto parse_out;
586                         }
587                         sc->sc_capa_max_probe_len
588                             = le32toh(*(const uint32_t *)tlv_data);
589                         /* limit it to something sensible */
590                         if (sc->sc_capa_max_probe_len > (1<<16)) {
591                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
592                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
593                                     "ridiculous\n", __func__);
594                                 error = EINVAL;
595                                 goto parse_out;
596                         }
597                         break;
598                 case IWM_UCODE_TLV_PAN:
599                         if (tlv_len) {
600                                 device_printf(sc->sc_dev,
601                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
602                                     __func__,
603                                     (int) tlv_len);
604                                 error = EINVAL;
605                                 goto parse_out;
606                         }
607                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
608                         break;
609                 case IWM_UCODE_TLV_FLAGS:
610                         if (tlv_len < sizeof(uint32_t)) {
611                                 device_printf(sc->sc_dev,
612                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
613                                     __func__,
614                                     (int) tlv_len);
615                                 error = EINVAL;
616                                 goto parse_out;
617                         }
618                         /*
619                          * Apparently there can be many flags, but Linux driver
620                          * parses only the first one, and so do we.
621                          *
622                          * XXX: why does this override IWM_UCODE_TLV_PAN?
623                          * Intentional or a bug?  Observations from
624                          * current firmware file:
625                          *  1) TLV_PAN is parsed first
626                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
627                          * ==> this resets TLV_PAN to itself... hnnnk
628                          */
629                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
630                         break;
631                 case IWM_UCODE_TLV_CSCHEME:
632                         if ((error = iwm_store_cscheme(sc,
633                             tlv_data, tlv_len)) != 0) {
634                                 device_printf(sc->sc_dev,
635                                     "%s: iwm_store_cscheme(): returned %d\n",
636                                     __func__,
637                                     error);
638                                 goto parse_out;
639                         }
640                         break;
641                 case IWM_UCODE_TLV_NUM_OF_CPU:
642                         if (tlv_len != sizeof(uint32_t)) {
643                                 device_printf(sc->sc_dev,
644                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
645                                     __func__,
646                                     (int) tlv_len);
647                                 error = EINVAL;
648                                 goto parse_out;
649                         }
650                         if (le32toh(*(const uint32_t*)tlv_data) != 1) {
651                                 device_printf(sc->sc_dev,
652                                     "%s: driver supports "
653                                     "only TLV_NUM_OF_CPU == 1",
654                                     __func__);
655                                 error = EINVAL;
656                                 goto parse_out;
657                         }
658                         break;
659                 case IWM_UCODE_TLV_SEC_RT:
660                         if ((error = iwm_firmware_store_section(sc,
661                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
662                                 device_printf(sc->sc_dev,
663                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
664                                     __func__,
665                                     error);
666                                 goto parse_out;
667                         }
668                         break;
669                 case IWM_UCODE_TLV_SEC_INIT:
670                         if ((error = iwm_firmware_store_section(sc,
671                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
674                                     __func__,
675                                     error);
676                                 goto parse_out;
677                         }
678                         break;
679                 case IWM_UCODE_TLV_SEC_WOWLAN:
680                         if ((error = iwm_firmware_store_section(sc,
681                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
682                                 device_printf(sc->sc_dev,
683                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
684                                     __func__,
685                                     error);
686                                 goto parse_out;
687                         }
688                         break;
689                 case IWM_UCODE_TLV_DEF_CALIB:
690                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
691                                 device_printf(sc->sc_dev,
692                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
693                                     __func__,
694                                     (int) tlv_len,
695                                     (int) sizeof(struct iwm_tlv_calib_data));
696                                 error = EINVAL;
697                                 goto parse_out;
698                         }
699                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
700                                 device_printf(sc->sc_dev,
701                                     "%s: iwm_set_default_calib() failed: %d\n",
702                                     __func__,
703                                     error);
704                                 goto parse_out;
705                         }
706                         break;
707                 case IWM_UCODE_TLV_PHY_SKU:
708                         if (tlv_len != sizeof(uint32_t)) {
709                                 error = EINVAL;
710                                 device_printf(sc->sc_dev,
711                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
712                                     __func__,
713                                     (int) tlv_len);
714                                 goto parse_out;
715                         }
716                         sc->sc_fw_phy_config =
717                             le32toh(*(const uint32_t *)tlv_data);
718                         break;
719
720                 case IWM_UCODE_TLV_API_CHANGES_SET:
721                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
722                         /* ignore, not used by current driver */
723                         break;
724
725                 default:
726                         device_printf(sc->sc_dev,
727                             "%s: unknown firmware section %d, abort\n",
728                             __func__, tlv_type);
729                         error = EINVAL;
730                         goto parse_out;
731                 }
732
733                 len -= roundup(tlv_len, 4);
734                 data += roundup(tlv_len, 4);
735         }
736
737         KASSERT(error == 0, ("unhandled error"));
738
739  parse_out:
740         if (error) {
741                 device_printf(sc->sc_dev, "firmware parse error %d, "
742                     "section type %d\n", error, tlv_type);
743         }
744
745         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
746                 device_printf(sc->sc_dev,
747                     "device uses unsupported power ops\n");
748                 error = ENOTSUP;
749         }
750
751  out:
752         if (error) {
753                 fw->fw_status = IWM_FW_STATUS_NONE;
754                 if (fw->fw_fp != NULL)
755                         iwm_fw_info_free(fw);
756         } else
757                 fw->fw_status = IWM_FW_STATUS_DONE;
758         wakeup(&sc->sc_fw);
759
760         return error;
761 }
762
763 /*
764  * DMA resource routines
765  */
766
767 static void
768 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
769 {
770         if (error != 0)
771                 return;
772         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
773         *(bus_addr_t *)arg = segs[0].ds_addr;
774 }
775
776 static int
777 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
778     bus_size_t size, bus_size_t alignment)
779 {
780         int error;
781
782         dma->tag = NULL;
783         dma->size = size;
784
785 #if defined(__DragonFly__)
786         error = bus_dma_tag_create(tag, alignment,
787                                    0,
788                                    BUS_SPACE_MAXADDR_32BIT,
789                                    BUS_SPACE_MAXADDR,
790                                    NULL, NULL,
791                                    size, 1, size,
792                                    BUS_DMA_NOWAIT, &dma->tag);
793 #else
794         error = bus_dma_tag_create(tag, alignment,
795             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
796             1, size, 0, NULL, NULL, &dma->tag);
797 #endif
798         if (error != 0)
799                 goto fail;
800
801         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
802             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
803         if (error != 0)
804                 goto fail;
805
806         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
807             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
808         if (error != 0)
809                 goto fail;
810
811         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
812
813         return 0;
814
815 fail:
816         iwm_dma_contig_free(dma);
817
818         return error;
819 }
820
821 static void
822 iwm_dma_contig_free(struct iwm_dma_info *dma)
823 {
824         if (dma->map != NULL) {
825                 if (dma->vaddr != NULL) {
826                         bus_dmamap_sync(dma->tag, dma->map,
827                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
828                         bus_dmamap_unload(dma->tag, dma->map);
829                         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
830                         dma->vaddr = NULL;
831                 }
832                 bus_dmamap_destroy(dma->tag, dma->map);
833                 dma->map = NULL;
834         }
835         if (dma->tag != NULL) {
836                 bus_dma_tag_destroy(dma->tag);
837                 dma->tag = NULL;
838         }
839
840 }
841
842 /* fwmem is used to load firmware onto the card */
843 static int
844 iwm_alloc_fwmem(struct iwm_softc *sc)
845 {
846         /* Must be aligned on a 16-byte boundary. */
847         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
848             sc->sc_fwdmasegsz, 16);
849 }
850
851 static void
852 iwm_free_fwmem(struct iwm_softc *sc)
853 {
854         iwm_dma_contig_free(&sc->fw_dma);
855 }
856
857 /* tx scheduler rings.  not used? */
858 static int
859 iwm_alloc_sched(struct iwm_softc *sc)
860 {
861         int rv;
862
863         /* TX scheduler rings must be aligned on a 1KB boundary. */
864         rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
865             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
866         return rv;
867 }
868
869 static void
870 iwm_free_sched(struct iwm_softc *sc)
871 {
872         iwm_dma_contig_free(&sc->sched_dma);
873 }
874
875 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
876 static int
877 iwm_alloc_kw(struct iwm_softc *sc)
878 {
879         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
880 }
881
882 static void
883 iwm_free_kw(struct iwm_softc *sc)
884 {
885         iwm_dma_contig_free(&sc->kw_dma);
886 }
887
888 /* interrupt cause table */
889 static int
890 iwm_alloc_ict(struct iwm_softc *sc)
891 {
892         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
893             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
894 }
895
896 static void
897 iwm_free_ict(struct iwm_softc *sc)
898 {
899         iwm_dma_contig_free(&sc->ict_dma);
900 }
901
902 static int
903 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
904 {
905         bus_size_t size;
906         int i, error;
907
908         ring->cur = 0;
909
910         /* Allocate RX descriptors (256-byte aligned). */
911         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
912         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
913         if (error != 0) {
914                 device_printf(sc->sc_dev,
915                     "could not allocate RX ring DMA memory\n");
916                 goto fail;
917         }
918         ring->desc = ring->desc_dma.vaddr;
919
920         /* Allocate RX status area (16-byte aligned). */
921         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
922             sizeof(*ring->stat), 16);
923         if (error != 0) {
924                 device_printf(sc->sc_dev,
925                     "could not allocate RX status DMA memory\n");
926                 goto fail;
927         }
928         ring->stat = ring->stat_dma.vaddr;
929
930         /* Create RX buffer DMA tag. */
931 #if defined(__DragonFly__)
932         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
933                                    0,
934                                    BUS_SPACE_MAXADDR_32BIT,
935                                    BUS_SPACE_MAXADDR,
936                                    NULL, NULL,
937                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
938                                    BUS_DMA_NOWAIT, &ring->data_dmat);
939 #else
940         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
941             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
942             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
943 #endif
944         if (error != 0) {
945                 device_printf(sc->sc_dev,
946                     "%s: could not create RX buf DMA tag, error %d\n",
947                     __func__, error);
948                 goto fail;
949         }
950
951         /*
952          * Allocate and map RX buffers.
953          */
954         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
955                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
956                         goto fail;
957                 }
958         }
959         return 0;
960
961 fail:   iwm_free_rx_ring(sc, ring);
962         return error;
963 }
964
965 static void
966 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
967 {
968         /* XXX conditional nic locks are stupid */
969         /* XXX print out if we can't lock the NIC? */
970         if (iwm_nic_lock(sc)) {
971                 /* XXX handle if RX stop doesn't finish? */
972                 (void) iwm_pcie_rx_stop(sc);
973                 iwm_nic_unlock(sc);
974         }
975         /* Reset the ring state */
976         ring->cur = 0;
977
978         /*
979          * The hw rx ring index in shared memory must also be cleared,
980          * otherwise the discrepancy can cause reprocessing chaos.
981          */
982         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
983 }
984
985 static void
986 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
987 {
988         int i;
989
990         iwm_dma_contig_free(&ring->desc_dma);
991         iwm_dma_contig_free(&ring->stat_dma);
992
993         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
994                 struct iwm_rx_data *data = &ring->data[i];
995
996                 if (data->m != NULL) {
997                         bus_dmamap_sync(ring->data_dmat, data->map,
998                             BUS_DMASYNC_POSTREAD);
999                         bus_dmamap_unload(ring->data_dmat, data->map);
1000                         m_freem(data->m);
1001                         data->m = NULL;
1002                 }
1003                 if (data->map != NULL) {
1004                         bus_dmamap_destroy(ring->data_dmat, data->map);
1005                         data->map = NULL;
1006                 }
1007         }
1008         if (ring->data_dmat != NULL) {
1009                 bus_dma_tag_destroy(ring->data_dmat);
1010                 ring->data_dmat = NULL;
1011         }
1012 }
1013
1014 static int
1015 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1016 {
1017         bus_addr_t paddr;
1018         bus_size_t size;
1019         int i, error;
1020
1021         ring->qid = qid;
1022         ring->queued = 0;
1023         ring->cur = 0;
1024
1025         /* Allocate TX descriptors (256-byte aligned). */
1026         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1027         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1028         if (error != 0) {
1029                 device_printf(sc->sc_dev,
1030                     "could not allocate TX ring DMA memory\n");
1031                 goto fail;
1032         }
1033         ring->desc = ring->desc_dma.vaddr;
1034
1035         /*
1036          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1037          * to allocate commands space for other rings.
1038          */
1039         if (qid > IWM_MVM_CMD_QUEUE)
1040                 return 0;
1041
1042         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1043         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1044         if (error != 0) {
1045                 device_printf(sc->sc_dev,
1046                     "could not allocate TX cmd DMA memory\n");
1047                 goto fail;
1048         }
1049         ring->cmd = ring->cmd_dma.vaddr;
1050
1051 #if defined(__DragonFly__)
1052         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1053                                    0,
1054                                    BUS_SPACE_MAXADDR_32BIT,
1055                                    BUS_SPACE_MAXADDR,
1056                                    NULL, NULL,
1057                                    MCLBYTES, IWM_MAX_SCATTER - 2, MCLBYTES,
1058                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1059 #else
1060         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1061             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1062             IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
1063 #endif
1064         if (error != 0) {
1065                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1066                 goto fail;
1067         }
1068
1069         paddr = ring->cmd_dma.paddr;
1070         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1071                 struct iwm_tx_data *data = &ring->data[i];
1072
1073                 data->cmd_paddr = paddr;
1074                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1075                     + offsetof(struct iwm_tx_cmd, scratch);
1076                 paddr += sizeof(struct iwm_device_cmd);
1077
1078                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1079                 if (error != 0) {
1080                         device_printf(sc->sc_dev,
1081                             "could not create TX buf DMA map\n");
1082                         goto fail;
1083                 }
1084         }
1085         KASSERT(paddr == ring->cmd_dma.paddr + size,
1086             ("invalid physical address"));
1087         return 0;
1088
1089 fail:   iwm_free_tx_ring(sc, ring);
1090         return error;
1091 }
1092
1093 static void
1094 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1095 {
1096         int i;
1097
1098         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1099                 struct iwm_tx_data *data = &ring->data[i];
1100
1101                 if (data->m != NULL) {
1102                         bus_dmamap_sync(ring->data_dmat, data->map,
1103                             BUS_DMASYNC_POSTWRITE);
1104                         bus_dmamap_unload(ring->data_dmat, data->map);
1105                         m_freem(data->m);
1106                         data->m = NULL;
1107                 }
1108         }
1109         /* Clear TX descriptors. */
1110         memset(ring->desc, 0, ring->desc_dma.size);
1111         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1112             BUS_DMASYNC_PREWRITE);
1113         sc->qfullmsk &= ~(1 << ring->qid);
1114         ring->queued = 0;
1115         ring->cur = 0;
1116 }
1117
1118 static void
1119 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1120 {
1121         int i;
1122
1123         iwm_dma_contig_free(&ring->desc_dma);
1124         iwm_dma_contig_free(&ring->cmd_dma);
1125
1126         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127                 struct iwm_tx_data *data = &ring->data[i];
1128
1129                 if (data->m != NULL) {
1130                         bus_dmamap_sync(ring->data_dmat, data->map,
1131                             BUS_DMASYNC_POSTWRITE);
1132                         bus_dmamap_unload(ring->data_dmat, data->map);
1133                         m_freem(data->m);
1134                         data->m = NULL;
1135                 }
1136                 if (data->map != NULL) {
1137                         bus_dmamap_destroy(ring->data_dmat, data->map);
1138                         data->map = NULL;
1139                 }
1140         }
1141         if (ring->data_dmat != NULL) {
1142                 bus_dma_tag_destroy(ring->data_dmat);
1143                 ring->data_dmat = NULL;
1144         }
1145 }
1146
1147 /*
1148  * High-level hardware frobbing routines
1149  */
1150
1151 static void
1152 iwm_enable_interrupts(struct iwm_softc *sc)
1153 {
1154         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1155         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1156 }
1157
1158 static void
1159 iwm_restore_interrupts(struct iwm_softc *sc)
1160 {
1161         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1162 }
1163
1164 static void
1165 iwm_disable_interrupts(struct iwm_softc *sc)
1166 {
1167         /* disable interrupts */
1168         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1169
1170         /* acknowledge all interrupts */
1171         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1172         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1173 }
1174
1175 static void
1176 iwm_ict_reset(struct iwm_softc *sc)
1177 {
1178         iwm_disable_interrupts(sc);
1179
1180         /* Reset ICT table. */
1181         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1182         sc->ict_cur = 0;
1183
1184         /* Set physical address of ICT table (4KB aligned). */
1185         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1186             IWM_CSR_DRAM_INT_TBL_ENABLE
1187             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1188             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1189
1190         /* Switch to ICT interrupt mode in driver. */
1191         sc->sc_flags |= IWM_FLAG_USE_ICT;
1192
1193         /* Re-enable interrupts. */
1194         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1195         iwm_enable_interrupts(sc);
1196 }
1197
1198 /*
1199  * Since this .. hard-resets things, it's time to actually
1200  * mark the first vap (if any) as having no mac context.
1201  * It's annoying, but since the driver is potentially being
1202  * stop/start'ed whilst active (thanks openbsd port!) we
1203  * have to correctly track this.
1204  */
1205 static void
1206 iwm_stop_device(struct iwm_softc *sc)
1207 {
1208         struct ieee80211com *ic = &sc->sc_ic;
1209         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1210         int chnl, ntries;
1211         int qid;
1212
1213         /* tell the device to stop sending interrupts */
1214         iwm_disable_interrupts(sc);
1215
1216         /*
1217          * FreeBSD-local: mark the first vap as not-uploaded,
1218          * so the next transition through auth/assoc
1219          * will correctly populate the MAC context.
1220          */
1221         if (vap) {
1222                 struct iwm_vap *iv = IWM_VAP(vap);
1223                 iv->is_uploaded = 0;
1224         }
1225
1226         /* device going down, Stop using ICT table */
1227         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1228
1229         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1230
1231         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1232
1233         /* Stop all DMA channels. */
1234         if (iwm_nic_lock(sc)) {
1235                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1236                         IWM_WRITE(sc,
1237                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1238                         for (ntries = 0; ntries < 200; ntries++) {
1239                                 uint32_t r;
1240
1241                                 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1242                                 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1243                                     chnl))
1244                                         break;
1245                                 DELAY(20);
1246                         }
1247                 }
1248                 iwm_nic_unlock(sc);
1249         }
1250
1251         /* Stop RX ring. */
1252         iwm_reset_rx_ring(sc, &sc->rxq);
1253
1254         /* Reset all TX rings. */
1255         for (qid = 0; qid < nitems(sc->txq); qid++)
1256                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1257
1258         /*
1259          * Power-down device's busmaster DMA clocks
1260          */
1261         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1262         DELAY(5);
1263
1264         /* Make sure (redundant) we've released our request to stay awake */
1265         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1266             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1267
1268         /* Stop the device, and put it in low power state */
1269         iwm_apm_stop(sc);
1270
1271         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1272          * Clean again the interrupt here
1273          */
1274         iwm_disable_interrupts(sc);
1275         /* stop and reset the on-board processor */
1276         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1277
1278         /*
1279          * Even if we stop the HW, we still want the RF kill
1280          * interrupt
1281          */
1282         iwm_enable_rfkill_int(sc);
1283         iwm_check_rfkill(sc);
1284 }
1285
1286 static void
1287 iwm_mvm_nic_config(struct iwm_softc *sc)
1288 {
1289         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1290         uint32_t reg_val = 0;
1291
1292         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1293             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1294         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1295             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1296         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1297             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1298
1299         /* SKU control */
1300         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1301             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1302         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1303             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1304
1305         /* radio configuration */
1306         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1307         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1308         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1309
1310         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1311
1312         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1313             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1314             radio_cfg_step, radio_cfg_dash);
1315
1316         /*
1317          * W/A : NIC is stuck in a reset state after Early PCIe power off
1318          * (PCIe power is lost before PERST# is asserted), causing ME FW
1319          * to lose ownership and not being able to obtain it back.
1320          */
1321         iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1322             IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1323             ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1324 }
1325
1326 static int
1327 iwm_nic_rx_init(struct iwm_softc *sc)
1328 {
1329         if (!iwm_nic_lock(sc))
1330                 return EBUSY;
1331
1332         /*
1333          * Initialize RX ring.  This is from the iwn driver.
1334          */
1335         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1336
1337         /* stop DMA */
1338         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1339         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1340         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1341         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1342         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1343
1344         /* Set physical address of RX ring (256-byte aligned). */
1345         IWM_WRITE(sc,
1346             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1347
1348         /* Set physical address of RX status (16-byte aligned). */
1349         IWM_WRITE(sc,
1350             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1351
1352 #if defined(__DragonFly__)
1353         /* Force serialization (probably not needed but don't trust the HW) */
1354         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1355 #endif
1356
1357         /* Enable RX. */
1358         /*
1359          * Note: Linux driver also sets this:
1360          *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1361          *
1362          * It causes weird behavior.  YMMV.
1363          */
1364         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1365             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1366             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1367             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1368             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1369             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1370
1371         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1372
1373         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1374         if (sc->host_interrupt_operation_mode)
1375                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1376
1377         /*
1378          * Thus sayeth el jefe (iwlwifi) via a comment:
1379          *
1380          * This value should initially be 0 (before preparing any
1381          * RBs), should be 8 after preparing the first 8 RBs (for example)
1382          */
1383         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1384
1385         iwm_nic_unlock(sc);
1386
1387         return 0;
1388 }
1389
1390 static int
1391 iwm_nic_tx_init(struct iwm_softc *sc)
1392 {
1393         int qid;
1394
1395         if (!iwm_nic_lock(sc))
1396                 return EBUSY;
1397
1398         /* Deactivate TX scheduler. */
1399         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1400
1401         /* Set physical address of "keep warm" page (16-byte aligned). */
1402         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1403
1404         /* Initialize TX rings. */
1405         for (qid = 0; qid < nitems(sc->txq); qid++) {
1406                 struct iwm_tx_ring *txq = &sc->txq[qid];
1407
1408                 /* Set physical address of TX ring (256-byte aligned). */
1409                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1410                     txq->desc_dma.paddr >> 8);
1411                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1412                     "%s: loading ring %d descriptors (%p) at %lx\n",
1413                     __func__,
1414                     qid, txq->desc,
1415                     (unsigned long) (txq->desc_dma.paddr >> 8));
1416         }
1417         iwm_nic_unlock(sc);
1418
1419         return 0;
1420 }
1421
1422 static int
1423 iwm_nic_init(struct iwm_softc *sc)
1424 {
1425         int error;
1426
1427         iwm_apm_init(sc);
1428         iwm_set_pwr(sc);
1429
1430         iwm_mvm_nic_config(sc);
1431
1432         if ((error = iwm_nic_rx_init(sc)) != 0)
1433                 return error;
1434
1435         /*
1436          * Ditto for TX, from iwn
1437          */
1438         if ((error = iwm_nic_tx_init(sc)) != 0)
1439                 return error;
1440
1441         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1442             "%s: shadow registers enabled\n", __func__);
1443         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1444
1445         return 0;
1446 }
1447
1448 enum iwm_mvm_tx_fifo {
1449         IWM_MVM_TX_FIFO_BK = 0,
1450         IWM_MVM_TX_FIFO_BE,
1451         IWM_MVM_TX_FIFO_VI,
1452         IWM_MVM_TX_FIFO_VO,
1453         IWM_MVM_TX_FIFO_MCAST = 5,
1454 };
1455
1456 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1457         IWM_MVM_TX_FIFO_VO,
1458         IWM_MVM_TX_FIFO_VI,
1459         IWM_MVM_TX_FIFO_BE,
1460         IWM_MVM_TX_FIFO_BK,
1461 };
1462
1463 static void
1464 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1465 {
1466         if (!iwm_nic_lock(sc)) {
1467                 device_printf(sc->sc_dev,
1468                     "%s: cannot enable txq %d\n",
1469                     __func__,
1470                     qid);
1471                 return; /* XXX return EBUSY */
1472         }
1473
1474         /* unactivate before configuration */
1475         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1476             (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1477             | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1478
1479         if (qid != IWM_MVM_CMD_QUEUE) {
1480                 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1481         }
1482
1483         iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1484
1485         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1486         iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1487
1488         iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1489         /* Set scheduler window size and frame limit. */
1490         iwm_write_mem32(sc,
1491             sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1492             sizeof(uint32_t),
1493             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1494             IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1495             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1496             IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1497
1498         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1499             (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1500             (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1501             (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1502             IWM_SCD_QUEUE_STTS_REG_MSK);
1503
1504         iwm_nic_unlock(sc);
1505
1506         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1507             "%s: enabled txq %d FIFO %d\n",
1508             __func__, qid, fifo);
1509 }
1510
1511 static int
1512 iwm_post_alive(struct iwm_softc *sc)
1513 {
1514         int nwords;
1515         int error, chnl;
1516
1517         if (!iwm_nic_lock(sc))
1518                 return EBUSY;
1519
1520         if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1521                 device_printf(sc->sc_dev,
1522                     "%s: sched addr mismatch",
1523                     __func__);
1524                 error = EINVAL;
1525                 goto out;
1526         }
1527
1528         iwm_ict_reset(sc);
1529
1530         /* Clear TX scheduler state in SRAM. */
1531         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1532             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1533             / sizeof(uint32_t);
1534         error = iwm_write_mem(sc,
1535             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1536             NULL, nwords);
1537         if (error)
1538                 goto out;
1539
1540         /* Set physical address of TX scheduler rings (1KB aligned). */
1541         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1542
1543         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1544
1545         /* enable command channel */
1546         iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1547
1548         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1549
1550         /* Enable DMA channels. */
1551         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1552                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1553                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1554                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1555         }
1556
1557         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1558             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1559
1560         /* Enable L1-Active */
1561         iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1562             IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1563
1564  out:
1565         iwm_nic_unlock(sc);
1566         return error;
1567 }
1568
1569 /*
1570  * NVM read access and content parsing.  We do not support
1571  * external NVM or writing NVM.
1572  * iwlwifi/mvm/nvm.c
1573  */
1574
1575 /* list of NVM sections we are allowed/need to read */
1576 const int nvm_to_read[] = {
1577         IWM_NVM_SECTION_TYPE_HW,
1578         IWM_NVM_SECTION_TYPE_SW,
1579         IWM_NVM_SECTION_TYPE_CALIBRATION,
1580         IWM_NVM_SECTION_TYPE_PRODUCTION,
1581 };
1582
1583 /* Default NVM size to read */
1584 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1585 #define IWM_MAX_NVM_SECTION_SIZE 7000
1586
1587 #define IWM_NVM_WRITE_OPCODE 1
1588 #define IWM_NVM_READ_OPCODE 0
1589
1590 static int
1591 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1592         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1593 {
1594         offset = 0;
1595         struct iwm_nvm_access_cmd nvm_access_cmd = {
1596                 .offset = htole16(offset),
1597                 .length = htole16(length),
1598                 .type = htole16(section),
1599                 .op_code = IWM_NVM_READ_OPCODE,
1600         };
1601         struct iwm_nvm_access_resp *nvm_resp;
1602         struct iwm_rx_packet *pkt;
1603         struct iwm_host_cmd cmd = {
1604                 .id = IWM_NVM_ACCESS_CMD,
1605                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1606                     IWM_CMD_SEND_IN_RFKILL,
1607                 .data = { &nvm_access_cmd, },
1608         };
1609         int ret, bytes_read, offset_read;
1610         uint8_t *resp_data;
1611
1612         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1613
1614         ret = iwm_send_cmd(sc, &cmd);
1615         if (ret)
1616                 return ret;
1617
1618         pkt = cmd.resp_pkt;
1619         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1620                 device_printf(sc->sc_dev,
1621                     "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1622                     __func__, pkt->hdr.flags);
1623                 ret = EIO;
1624                 goto exit;
1625         }
1626
1627         /* Extract NVM response */
1628         nvm_resp = (void *)pkt->data;
1629
1630         ret = le16toh(nvm_resp->status);
1631         bytes_read = le16toh(nvm_resp->length);
1632         offset_read = le16toh(nvm_resp->offset);
1633         resp_data = nvm_resp->data;
1634         if (ret) {
1635                 device_printf(sc->sc_dev,
1636                     "%s: NVM access command failed with status %d\n",
1637                     __func__, ret);
1638                 ret = EINVAL;
1639                 goto exit;
1640         }
1641
1642         if (offset_read != offset) {
1643                 device_printf(sc->sc_dev,
1644                     "%s: NVM ACCESS response with invalid offset %d\n",
1645                     __func__, offset_read);
1646                 ret = EINVAL;
1647                 goto exit;
1648         }
1649
1650         memcpy(data + offset, resp_data, bytes_read);
1651         *len = bytes_read;
1652
1653  exit:
1654         iwm_free_resp(sc, &cmd);
1655         return ret;
1656 }
1657
1658 /*
1659  * Reads an NVM section completely.
1660  * NICs prior to 7000 family doesn't have a real NVM, but just read
1661  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1662  * by uCode, we need to manually check in this case that we don't
1663  * overflow and try to read more than the EEPROM size.
1664  * For 7000 family NICs, we supply the maximal size we can read, and
1665  * the uCode fills the response with as much data as we can,
1666  * without overflowing, so no check is needed.
1667  */
1668 static int
1669 iwm_nvm_read_section(struct iwm_softc *sc,
1670         uint16_t section, uint8_t *data, uint16_t *len)
1671 {
1672         uint16_t length, seglen;
1673         int error;
1674
1675         /* Set nvm section read length */
1676         length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1677         *len = 0;
1678
1679         /* Read the NVM until exhausted (reading less than requested) */
1680         while (seglen == length) {
1681                 error = iwm_nvm_read_chunk(sc,
1682                     section, *len, length, data, &seglen);
1683                 if (error) {
1684                         device_printf(sc->sc_dev,
1685                             "Cannot read NVM from section "
1686                             "%d offset %d, length %d\n",
1687                             section, *len, length);
1688                         return error;
1689                 }
1690                 *len += seglen;
1691         }
1692
1693         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1694             "NVM section %d read completed\n", section);
1695         return 0;
1696 }
1697
1698 /*
1699  * BEGIN IWM_NVM_PARSE
1700  */
1701
1702 /* NVM offsets (in words) definitions */
1703 enum wkp_nvm_offsets {
1704         /* NVM HW-Section offset (in words) definitions */
1705         IWM_HW_ADDR = 0x15,
1706
1707 /* NVM SW-Section offset (in words) definitions */
1708         IWM_NVM_SW_SECTION = 0x1C0,
1709         IWM_NVM_VERSION = 0,
1710         IWM_RADIO_CFG = 1,
1711         IWM_SKU = 2,
1712         IWM_N_HW_ADDRS = 3,
1713         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1714
1715 /* NVM calibration section offset (in words) definitions */
1716         IWM_NVM_CALIB_SECTION = 0x2B8,
1717         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1718 };
1719
1720 /* SKU Capabilities (actual values from NVM definition) */
1721 enum nvm_sku_bits {
1722         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1723         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1724         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1725         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1726 };
1727
1728 /* radio config bits (actual values from NVM definition) */
1729 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1730 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1731 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1732 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1733 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1734 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1735
1736 #define DEFAULT_MAX_TX_POWER 16
1737
1738 /**
1739  * enum iwm_nvm_channel_flags - channel flags in NVM
1740  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1741  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1742  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1743  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1744  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1745  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1746  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1747  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1748  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1749  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1750  */
1751 enum iwm_nvm_channel_flags {
1752         IWM_NVM_CHANNEL_VALID = (1 << 0),
1753         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1754         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1755         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1756         IWM_NVM_CHANNEL_DFS = (1 << 7),
1757         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1758         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1759         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1760         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1761 };
1762
1763 /*
1764  * Translate EEPROM flags to net80211.
1765  */
1766 static uint32_t
1767 iwm_eeprom_channel_flags(uint16_t ch_flags)
1768 {
1769         uint32_t nflags;
1770
1771         nflags = 0;
1772         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1773                 nflags |= IEEE80211_CHAN_PASSIVE;
1774         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1775                 nflags |= IEEE80211_CHAN_NOADHOC;
1776         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1777                 nflags |= IEEE80211_CHAN_DFS;
1778                 /* Just in case. */
1779                 nflags |= IEEE80211_CHAN_NOADHOC;
1780         }
1781
1782         return (nflags);
1783 }
1784
1785 static void
1786 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1787     int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1788 {
1789         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1790         uint32_t nflags;
1791         uint16_t ch_flags;
1792         uint8_t ieee;
1793         int error;
1794
1795         for (; ch_idx < ch_num; ch_idx++) {
1796                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1797                 ieee = iwm_nvm_channels[ch_idx];
1798
1799                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1800                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1801                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1802                             ieee, ch_flags,
1803                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1804                             "5.2" : "2.4");
1805                         continue;
1806                 }
1807
1808                 nflags = iwm_eeprom_channel_flags(ch_flags);
1809                 error = ieee80211_add_channel(chans, maxchans, nchans,
1810                     ieee, 0, 0, nflags, bands);
1811                 if (error != 0)
1812                         break;
1813
1814                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1815                     "Ch. %d Flags %x [%sGHz] - Added\n",
1816                     ieee, ch_flags,
1817                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1818                     "5.2" : "2.4");
1819         }
1820 }
1821
1822 static void
1823 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1824     struct ieee80211_channel chans[])
1825 {
1826         struct iwm_softc *sc = ic->ic_softc;
1827         struct iwm_nvm_data *data = &sc->sc_nvm;
1828         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
1829
1830         memset(bands, 0, sizeof(bands));
1831         /* 1-13: 11b/g channels. */
1832         setbit(bands, IEEE80211_MODE_11B);
1833         setbit(bands, IEEE80211_MODE_11G);
1834         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1835             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1836
1837         /* 14: 11b channel only. */
1838         clrbit(bands, IEEE80211_MODE_11G);
1839         iwm_add_channel_band(sc, chans, maxchans, nchans,
1840             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1841
1842         if (data->sku_cap_band_52GHz_enable) {
1843                 memset(bands, 0, sizeof(bands));
1844                 setbit(bands, IEEE80211_MODE_11A);
1845                 iwm_add_channel_band(sc, chans, maxchans, nchans,
1846                     IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1847         }
1848 }
1849
1850 static int
1851 iwm_parse_nvm_data(struct iwm_softc *sc,
1852         const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1853         const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1854 {
1855         struct iwm_nvm_data *data = &sc->sc_nvm;
1856         uint8_t hw_addr[IEEE80211_ADDR_LEN];
1857         uint16_t radio_cfg, sku;
1858
1859         data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1860
1861         radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1862         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1863         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1864         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1865         data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1866         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1867         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1868
1869         sku = le16_to_cpup(nvm_sw + IWM_SKU);
1870         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1871         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1872         data->sku_cap_11n_enable = 0;
1873
1874         if (!data->valid_tx_ant || !data->valid_rx_ant) {
1875                 device_printf(sc->sc_dev,
1876                     "%s: invalid antennas (0x%x, 0x%x)\n",
1877                     __func__, data->valid_tx_ant,
1878                     data->valid_rx_ant);
1879                 return EINVAL;
1880         }
1881
1882         data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1883
1884         data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1885         data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1886
1887         /* The byte order is little endian 16 bit, meaning 214365 */
1888         IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1889         data->hw_addr[0] = hw_addr[1];
1890         data->hw_addr[1] = hw_addr[0];
1891         data->hw_addr[2] = hw_addr[3];
1892         data->hw_addr[3] = hw_addr[2];
1893         data->hw_addr[4] = hw_addr[5];
1894         data->hw_addr[5] = hw_addr[4];
1895
1896         memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1897             sizeof(data->nvm_ch_flags));
1898         data->calib_version = 255;   /* TODO:
1899                                         this value will prevent some checks from
1900                                         failing, we need to check if this
1901                                         field is still needed, and if it does,
1902                                         where is it in the NVM */
1903
1904         return 0;
1905 }
1906
1907 /*
1908  * END NVM PARSE
1909  */
1910
1911 struct iwm_nvm_section {
1912         uint16_t length;
1913         const uint8_t *data;
1914 };
1915
1916 static int
1917 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1918 {
1919         const uint16_t *hw, *sw, *calib;
1920
1921         /* Checking for required sections */
1922         if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1923             !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1924                 device_printf(sc->sc_dev,
1925                     "%s: Can't parse empty NVM sections\n",
1926                     __func__);
1927                 return ENOENT;
1928         }
1929
1930         hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1931         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1932         calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1933         return iwm_parse_nvm_data(sc, hw, sw, calib,
1934             IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1935 }
1936
1937 static int
1938 iwm_nvm_init(struct iwm_softc *sc)
1939 {
1940         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1941         int i, section, error;
1942         uint16_t len;
1943         uint8_t *nvm_buffer, *temp;
1944
1945         /* Read From FW NVM */
1946         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1947             "%s: Read NVM\n",
1948             __func__);
1949
1950         /* TODO: find correct NVM max size for a section */
1951         nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1952         if (nvm_buffer == NULL)
1953                 return (ENOMEM);
1954         for (i = 0; i < nitems(nvm_to_read); i++) {
1955                 section = nvm_to_read[i];
1956                 KASSERT(section <= nitems(nvm_sections),
1957                     ("too many sections"));
1958
1959                 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1960                 if (error)
1961                         break;
1962
1963                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1964                 if (temp == NULL) {
1965                         error = ENOMEM;
1966                         break;
1967                 }
1968                 memcpy(temp, nvm_buffer, len);
1969                 nvm_sections[section].data = temp;
1970                 nvm_sections[section].length = len;
1971         }
1972         kfree(nvm_buffer, M_DEVBUF);
1973         if (error)
1974                 return error;
1975
1976         return iwm_parse_nvm_sections(sc, nvm_sections);
1977 }
1978
1979 /*
1980  * Firmware loading gunk.  This is kind of a weird hybrid between the
1981  * iwn driver and the Linux iwlwifi driver.
1982  */
1983
1984 static int
1985 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1986         const uint8_t *section, uint32_t byte_cnt)
1987 {
1988         struct iwm_dma_info *dma = &sc->fw_dma;
1989         int error;
1990
1991         /* Copy firmware section into pre-allocated DMA-safe memory. */
1992         memcpy(dma->vaddr, section, byte_cnt);
1993         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1994
1995         if (!iwm_nic_lock(sc))
1996                 return EBUSY;
1997
1998         sc->sc_fw_chunk_done = 0;
1999
2000         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2001             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2002         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2003             dst_addr);
2004         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2005             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2006         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2007             (iwm_get_dma_hi_addr(dma->paddr)
2008               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2009         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2010             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2011             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2012             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2013         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2014             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2015             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2016             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2017
2018         iwm_nic_unlock(sc);
2019
2020         /* wait 1s for this segment to load */
2021         error = 0;
2022         while (!sc->sc_fw_chunk_done) {
2023 #if defined(__DragonFly__)
2024                 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2025 #else
2026                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2027 #endif
2028                 if (error)
2029                         break;
2030         }
2031
2032         return error;
2033 }
2034
2035 static int
2036 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2037 {
2038         struct iwm_fw_sects *fws;
2039         int error, i, w;
2040         const void *data;
2041         uint32_t dlen;
2042         uint32_t offset;
2043
2044         sc->sc_uc.uc_intr = 0;
2045
2046         fws = &sc->sc_fw.fw_sects[ucode_type];
2047         for (i = 0; i < fws->fw_count; i++) {
2048                 data = fws->fw_sect[i].fws_data;
2049                 dlen = fws->fw_sect[i].fws_len;
2050                 offset = fws->fw_sect[i].fws_devoff;
2051                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2052                     "LOAD FIRMWARE type %d offset %u len %d\n",
2053                     ucode_type, offset, dlen);
2054                 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2055                 if (error) {
2056                         device_printf(sc->sc_dev,
2057                             "%s: chunk %u of %u returned error %02d\n",
2058                             __func__, i, fws->fw_count, error);
2059                         return error;
2060                 }
2061         }
2062
2063         /* wait for the firmware to load */
2064         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2065
2066         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2067 #if defined(__DragonFly__)
2068                 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2069 #else
2070                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2071 #endif
2072         }
2073
2074         return error;
2075 }
2076
2077 static int
2078 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2079 {
2080         int error;
2081
2082         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2083
2084         if ((error = iwm_nic_init(sc)) != 0) {
2085                 device_printf(sc->sc_dev, "unable to init nic\n");
2086                 return error;
2087         }
2088
2089         /* make sure rfkill handshake bits are cleared */
2090         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2091         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2092             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2093
2094         /* clear (again), then enable host interrupts */
2095         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2096         iwm_enable_interrupts(sc);
2097
2098         /* really make sure rfkill handshake bits are cleared */
2099         /* maybe we should write a few times more?  just to make sure */
2100         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2101         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2102
2103         /* Load the given image to the HW */
2104         return iwm_load_firmware(sc, ucode_type);
2105 }
2106
2107 static int
2108 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2109 {
2110         return iwm_post_alive(sc);
2111 }
2112
2113 static int
2114 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2115 {
2116         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2117                 .valid = htole32(valid_tx_ant),
2118         };
2119
2120         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2121             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2122 }
2123
2124 static int
2125 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2126 {
2127         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2128         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2129
2130         /* Set parameters */
2131         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2132         phy_cfg_cmd.calib_control.event_trigger =
2133             sc->sc_default_calib[ucode_type].event_trigger;
2134         phy_cfg_cmd.calib_control.flow_trigger =
2135             sc->sc_default_calib[ucode_type].flow_trigger;
2136
2137         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2138             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2139         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2140             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2141 }
2142
2143 static int
2144 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2145         enum iwm_ucode_type ucode_type)
2146 {
2147         enum iwm_ucode_type old_type = sc->sc_uc_current;
2148         int error;
2149
2150         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2151                 kprintf("iwm_read_firmweare: failed %d\n",
2152                         error);
2153                 return error;
2154         }
2155
2156         sc->sc_uc_current = ucode_type;
2157         error = iwm_start_fw(sc, ucode_type);
2158         if (error) {
2159                 kprintf("iwm_start_fw: failed %d\n", error);
2160                 sc->sc_uc_current = old_type;
2161                 return error;
2162         }
2163
2164         error = iwm_fw_alive(sc, sc->sched_base);
2165         if (error) {
2166                 kprintf("iwm_fw_alive: failed %d\n", error);
2167         }
2168         return error;
2169 }
2170
2171 /*
2172  * mvm misc bits
2173  */
2174
2175 static int
2176 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2177 {
2178         int error;
2179
2180         /* do not operate with rfkill switch turned on */
2181         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2182                 device_printf(sc->sc_dev,
2183                     "radio is disabled by hardware switch\n");
2184                 return EPERM;
2185         }
2186
2187         sc->sc_init_complete = 0;
2188         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2189             IWM_UCODE_TYPE_INIT)) != 0)
2190                 return error;
2191
2192         if (justnvm) {
2193                 if ((error = iwm_nvm_init(sc)) != 0) {
2194                         device_printf(sc->sc_dev, "failed to read nvm\n");
2195                         return error;
2196                 }
2197                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2198
2199                 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2200                     + sc->sc_capa_max_probe_len
2201                     + IWM_MAX_NUM_SCAN_CHANNELS
2202                     * sizeof(struct iwm_scan_channel);
2203                 sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2204                     M_INTWAIT);
2205                 if (sc->sc_scan_cmd == NULL)
2206                         return (ENOMEM);
2207
2208                 return 0;
2209         }
2210
2211         /* Send TX valid antennas before triggering calibrations */
2212         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2213                 kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2214                 return error;
2215         }
2216
2217         /*
2218         * Send phy configurations command to init uCode
2219         * to start the 16.0 uCode init image internal calibrations.
2220         */
2221         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2222                 device_printf(sc->sc_dev,
2223                     "%s: failed to run internal calibration: %d\n",
2224                     __func__, error);
2225                 return error;
2226         }
2227
2228         /*
2229          * Nothing to do but wait for the init complete notification
2230          * from the firmware
2231          */
2232         while (!sc->sc_init_complete) {
2233 #if defined(__DragonFly__)
2234                 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2235                                  0, "iwminit", 2*hz);
2236 #else
2237                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2238                                  0, "iwminit", 2*hz);
2239 #endif
2240                 if (error) {
2241                         kprintf("init complete failed %d\n",
2242                                 sc->sc_init_complete);
2243                         break;
2244                 }
2245         }
2246
2247         return error;
2248 }
2249
2250 /*
2251  * receive side
2252  */
2253
2254 /* (re)stock rx ring, called at init-time and at runtime */
2255 static int
2256 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2257 {
2258         struct iwm_rx_ring *ring = &sc->rxq;
2259         struct iwm_rx_data *data = &ring->data[idx];
2260         struct mbuf *m;
2261         int error;
2262         bus_addr_t paddr;
2263
2264         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2265         if (m == NULL)
2266                 return ENOBUFS;
2267
2268         if (data->m != NULL)
2269                 bus_dmamap_unload(ring->data_dmat, data->map);
2270
2271         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2272         error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2273         if (error != 0) {
2274                 device_printf(sc->sc_dev,
2275                     "%s: could not create RX buf DMA map, error %d\n",
2276                     __func__, error);
2277                 goto fail;
2278         }
2279         data->m = m;
2280         error = bus_dmamap_load(ring->data_dmat, data->map,
2281             mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2282             &paddr, BUS_DMA_NOWAIT);
2283         if (error != 0 && error != EFBIG) {
2284                 device_printf(sc->sc_dev,
2285                     "%s: can't not map mbuf, error %d\n", __func__,
2286                     error);
2287                 goto fail;
2288         }
2289         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2290
2291         /* Update RX descriptor. */
2292         KKASSERT((paddr & 255) == 0);
2293         ring->desc[idx] = htole32(paddr >> 8);
2294         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2295             BUS_DMASYNC_PREWRITE);
2296
2297         return 0;
2298 fail:
2299         return error;
2300 }
2301
2302 #define IWM_RSSI_OFFSET 50
2303 static int
2304 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2305 {
2306         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2307         uint32_t agc_a, agc_b;
2308         uint32_t val;
2309
2310         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2311         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2312         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2313
2314         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2315         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2316         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2317
2318         /*
2319          * dBm = rssi dB - agc dB - constant.
2320          * Higher AGC (higher radio gain) means lower signal.
2321          */
2322         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2323         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2324         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2325
2326         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2327             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2328             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2329
2330         return max_rssi_dbm;
2331 }
2332
2333 /*
2334  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2335  * values are reported by the fw as positive values - need to negate
2336  * to obtain their dBM.  Account for missing antennas by replacing 0
2337  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2338  */
2339 static int
2340 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2341 {
2342         int energy_a, energy_b, energy_c, max_energy;
2343         uint32_t val;
2344
2345         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2346         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2347             IWM_RX_INFO_ENERGY_ANT_A_POS;
2348         energy_a = energy_a ? -energy_a : -256;
2349         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2350             IWM_RX_INFO_ENERGY_ANT_B_POS;
2351         energy_b = energy_b ? -energy_b : -256;
2352         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2353             IWM_RX_INFO_ENERGY_ANT_C_POS;
2354         energy_c = energy_c ? -energy_c : -256;
2355         max_energy = MAX(energy_a, energy_b);
2356         max_energy = MAX(max_energy, energy_c);
2357
2358         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2359             "energy In A %d B %d C %d , and max %d\n",
2360             energy_a, energy_b, energy_c, max_energy);
2361
2362         return max_energy;
2363 }
2364
2365 static void
2366 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2367         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2368 {
2369         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2370
2371         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2372         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2373
2374         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2375 }
2376
2377 /*
2378  * Retrieve the average noise (in dBm) among receivers.
2379  */
2380 static int
2381 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2382 {
2383         int i, total, nbant, noise;
2384
2385         total = nbant = noise = 0;
2386         for (i = 0; i < 3; i++) {
2387                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2388                 if (noise) {
2389                         total += noise;
2390                         nbant++;
2391                 }
2392         }
2393
2394         /* There should be at least one antenna but check anyway. */
2395         return (nbant == 0) ? -127 : (total / nbant) - 107;
2396 }
2397
2398 /*
2399  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2400  *
2401  * Handles the actual data of the Rx packet from the fw
2402  */
2403 static void
2404 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2405         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2406 {
2407         struct ieee80211com *ic = &sc->sc_ic;
2408         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2409         struct ieee80211_frame *wh;
2410         struct ieee80211_node *ni;
2411         struct ieee80211_rx_stats rxs;
2412         struct mbuf *m;
2413         struct iwm_rx_phy_info *phy_info;
2414         struct iwm_rx_mpdu_res_start *rx_res;
2415         uint32_t len;
2416         uint32_t rx_pkt_status;
2417         int rssi;
2418
2419         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2420
2421         phy_info = &sc->sc_last_phy_info;
2422         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2423         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2424         len = le16toh(rx_res->byte_count);
2425         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2426
2427         m = data->m;
2428         m->m_data = pkt->data + sizeof(*rx_res);
2429         m->m_pkthdr.len = m->m_len = len;
2430
2431         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2432                 device_printf(sc->sc_dev,
2433                     "dsp size out of range [0,20]: %d\n",
2434                     phy_info->cfg_phy_cnt);
2435                 return;
2436         }
2437
2438         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2439             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2440                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2441                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2442                 return; /* drop */
2443         }
2444
2445         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2446                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2447         } else {
2448                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2449         }
2450         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2451         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2452
2453         /* replenish ring for the buffer we're going to feed to the sharks */
2454         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2455                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2456                     __func__);
2457                 return;
2458         }
2459
2460         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2461
2462         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2463             "%s: phy_info: channel=%d, flags=0x%08x\n",
2464             __func__,
2465             le16toh(phy_info->channel),
2466             le16toh(phy_info->phy_flags));
2467
2468         /*
2469          * Populate an RX state struct with the provided information.
2470          */
2471         bzero(&rxs, sizeof(rxs));
2472         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2473         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2474         rxs.c_ieee = le16toh(phy_info->channel);
2475         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2476                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2477         } else {
2478                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2479         }
2480         rxs.rssi = rssi - sc->sc_noise;
2481         rxs.nf = sc->sc_noise;
2482
2483         if (ieee80211_radiotap_active_vap(vap)) {
2484                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2485
2486                 tap->wr_flags = 0;
2487                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2488                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2489                 tap->wr_chan_freq = htole16(rxs.c_freq);
2490                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2491                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2492                 tap->wr_dbm_antsignal = (int8_t)rssi;
2493                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2494                 tap->wr_tsft = phy_info->system_timestamp;
2495                 switch (phy_info->rate) {
2496                 /* CCK rates. */
2497                 case  10: tap->wr_rate =   2; break;
2498                 case  20: tap->wr_rate =   4; break;
2499                 case  55: tap->wr_rate =  11; break;
2500                 case 110: tap->wr_rate =  22; break;
2501                 /* OFDM rates. */
2502                 case 0xd: tap->wr_rate =  12; break;
2503                 case 0xf: tap->wr_rate =  18; break;
2504                 case 0x5: tap->wr_rate =  24; break;
2505                 case 0x7: tap->wr_rate =  36; break;
2506                 case 0x9: tap->wr_rate =  48; break;
2507                 case 0xb: tap->wr_rate =  72; break;
2508                 case 0x1: tap->wr_rate =  96; break;
2509                 case 0x3: tap->wr_rate = 108; break;
2510                 /* Unknown rate: should not happen. */
2511                 default:  tap->wr_rate =   0;
2512                 }
2513         }
2514
2515         IWM_UNLOCK(sc);
2516         if (ni != NULL) {
2517                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2518                 ieee80211_input_mimo(ni, m, &rxs);
2519                 ieee80211_free_node(ni);
2520         } else {
2521                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2522                 ieee80211_input_mimo_all(ic, m, &rxs);
2523         }
2524         IWM_LOCK(sc);
2525 }
2526
2527 static int
2528 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2529         struct iwm_node *in)
2530 {
2531         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2532         struct ieee80211_node *ni = &in->in_ni;
2533         struct ieee80211vap *vap = ni->ni_vap;
2534         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2535         int failack = tx_resp->failure_frame;
2536
2537         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2538
2539         /* Update rate control statistics. */
2540         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2541             __func__,
2542             (int) le16toh(tx_resp->status.status),
2543             (int) le16toh(tx_resp->status.sequence),
2544             tx_resp->frame_count,
2545             tx_resp->bt_kill_count,
2546             tx_resp->failure_rts,
2547             tx_resp->failure_frame,
2548             le32toh(tx_resp->initial_rate),
2549             (int) le16toh(tx_resp->wireless_media_time));
2550
2551         if (status != IWM_TX_STATUS_SUCCESS &&
2552             status != IWM_TX_STATUS_DIRECT_DONE) {
2553                 ieee80211_ratectl_tx_complete(vap, ni,
2554                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2555                 return (1);
2556         } else {
2557                 ieee80211_ratectl_tx_complete(vap, ni,
2558                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2559                 return (0);
2560         }
2561 }
2562
2563 static void
2564 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2565         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2566 {
2567         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2568         int idx = cmd_hdr->idx;
2569         int qid = cmd_hdr->qid;
2570         struct iwm_tx_ring *ring = &sc->txq[qid];
2571         struct iwm_tx_data *txd = &ring->data[idx];
2572         struct iwm_node *in = txd->in;
2573         struct mbuf *m = txd->m;
2574         int status;
2575
2576         KASSERT(txd->done == 0, ("txd not done"));
2577         KASSERT(txd->in != NULL, ("txd without node"));
2578         KASSERT(txd->m != NULL, ("txd without mbuf"));
2579
2580         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2581
2582         sc->sc_tx_timer = 0;
2583
2584         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2585
2586         /* Unmap and free mbuf. */
2587         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2588         bus_dmamap_unload(ring->data_dmat, txd->map);
2589
2590         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2591             "free txd %p, in %p\n", txd, txd->in);
2592         txd->done = 1;
2593         txd->m = NULL;
2594         txd->in = NULL;
2595
2596         ieee80211_tx_complete(&in->in_ni, m, status);
2597
2598         if (--ring->queued < IWM_TX_RING_LOMARK) {
2599                 sc->qfullmsk &= ~(1 << ring->qid);
2600                 if (sc->qfullmsk == 0) {
2601                         /*
2602                          * Well, we're in interrupt context, but then again
2603                          * I guess net80211 does all sorts of stunts in
2604                          * interrupt context, so maybe this is no biggie.
2605                          */
2606                         iwm_start(sc);
2607                 }
2608         }
2609 }
2610
2611 /*
2612  * transmit side
2613  */
2614
2615 /*
2616  * Process a "command done" firmware notification.  This is where we wakeup
2617  * processes waiting for a synchronous command completion.
2618  * from if_iwn
2619  */
2620 static void
2621 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2622 {
2623         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2624         struct iwm_tx_data *data;
2625
2626         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2627                 return; /* Not a command ack. */
2628         }
2629
2630         data = &ring->data[pkt->hdr.idx];
2631
2632         /* If the command was mapped in an mbuf, free it. */
2633         if (data->m != NULL) {
2634                 bus_dmamap_sync(ring->data_dmat, data->map,
2635                     BUS_DMASYNC_POSTWRITE);
2636                 bus_dmamap_unload(ring->data_dmat, data->map);
2637                 m_freem(data->m);
2638                 data->m = NULL;
2639         }
2640         wakeup(&ring->desc[pkt->hdr.idx]);
2641 }
2642
2643 #if 0
2644 /*
2645  * necessary only for block ack mode
2646  */
2647 void
2648 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2649         uint16_t len)
2650 {
2651         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2652         uint16_t w_val;
2653
2654         scd_bc_tbl = sc->sched_dma.vaddr;
2655
2656         len += 8; /* magic numbers came naturally from paris */
2657         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2658                 len = roundup(len, 4) / 4;
2659
2660         w_val = htole16(sta_id << 12 | len);
2661
2662         /* Update TX scheduler. */
2663         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2664         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2665             BUS_DMASYNC_PREWRITE);
2666
2667         /* I really wonder what this is ?!? */
2668         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2669                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2670                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2671                     BUS_DMASYNC_PREWRITE);
2672         }
2673 }
2674 #endif
2675
2676 /*
2677  * Take an 802.11 (non-n) rate, find the relevant rate
2678  * table entry.  return the index into in_ridx[].
2679  *
2680  * The caller then uses that index back into in_ridx
2681  * to figure out the rate index programmed /into/
2682  * the firmware for this given node.
2683  */
2684 static int
2685 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2686     uint8_t rate)
2687 {
2688         int i;
2689         uint8_t r;
2690
2691         for (i = 0; i < nitems(in->in_ridx); i++) {
2692                 r = iwm_rates[in->in_ridx[i]].rate;
2693                 if (rate == r)
2694                         return (i);
2695         }
2696         /* XXX Return the first */
2697         /* XXX TODO: have it return the /lowest/ */
2698         return (0);
2699 }
2700
2701 /*
2702  * Fill in the rate related information for a transmit command.
2703  */
2704 static const struct iwm_rate *
2705 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2706         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2707 {
2708         struct ieee80211com *ic = &sc->sc_ic;
2709         struct ieee80211_node *ni = &in->in_ni;
2710         const struct iwm_rate *rinfo;
2711         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2712         int ridx, rate_flags;
2713
2714         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2715         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2716
2717         /*
2718          * XXX TODO: everything about the rate selection here is terrible!
2719          */
2720
2721         if (type == IEEE80211_FC0_TYPE_DATA) {
2722                 int i;
2723                 /* for data frames, use RS table */
2724                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2725                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2726                 ridx = in->in_ridx[i];
2727
2728                 /* This is the index into the programmed table */
2729                 tx->initial_rate_index = i;
2730                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2731                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2732                     "%s: start with i=%d, txrate %d\n",
2733                     __func__, i, iwm_rates[ridx].rate);
2734         } else {
2735                 /*
2736                  * For non-data, use the lowest supported rate for the given
2737                  * operational mode.
2738                  *
2739                  * Note: there may not be any rate control information available.
2740                  * This driver currently assumes if we're transmitting data
2741                  * frames, use the rate control table.  Grr.
2742                  *
2743                  * XXX TODO: use the configured rate for the traffic type!
2744                  * XXX TODO: this should be per-vap, not curmode; as we later
2745                  * on we'll want to handle off-channel stuff (eg TDLS).
2746                  */
2747                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
2748                         /*
2749                          * XXX this assumes the mode is either 11a or not 11a;
2750                          * definitely won't work for 11n.
2751                          */
2752                         ridx = IWM_RIDX_OFDM;
2753                 } else {
2754                         ridx = IWM_RIDX_CCK;
2755                 }
2756         }
2757
2758         rinfo = &iwm_rates[ridx];
2759
2760         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2761             __func__, ridx,
2762             rinfo->rate,
2763             !! (IWM_RIDX_IS_CCK(ridx))
2764             );
2765
2766         /* XXX TODO: hard-coded TX antenna? */
2767         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2768         if (IWM_RIDX_IS_CCK(ridx))
2769                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2770         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2771
2772         return rinfo;
2773 }
2774
2775 #define TB0_SIZE 16
2776 static int
2777 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2778 {
2779         struct ieee80211com *ic = &sc->sc_ic;
2780         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2781         struct iwm_node *in = IWM_NODE(ni);
2782         struct iwm_tx_ring *ring;
2783         struct iwm_tx_data *data;
2784         struct iwm_tfd *desc;
2785         struct iwm_device_cmd *cmd;
2786         struct iwm_tx_cmd *tx;
2787         struct ieee80211_frame *wh;
2788         struct ieee80211_key *k = NULL;
2789         const struct iwm_rate *rinfo;
2790         uint32_t flags;
2791         u_int hdrlen;
2792         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2793         int nsegs;
2794         uint8_t tid, type;
2795         int i, totlen, error, pad;
2796
2797         wh = mtod(m, struct ieee80211_frame *);
2798         hdrlen = ieee80211_anyhdrsize(wh);
2799         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2800         tid = 0;
2801         ring = &sc->txq[ac];
2802         desc = &ring->desc[ring->cur];
2803         memset(desc, 0, sizeof(*desc));
2804         data = &ring->data[ring->cur];
2805
2806         /* Fill out iwm_tx_cmd to send to the firmware */
2807         cmd = &ring->cmd[ring->cur];
2808         cmd->hdr.code = IWM_TX_CMD;
2809         cmd->hdr.flags = 0;
2810         cmd->hdr.qid = ring->qid;
2811         cmd->hdr.idx = ring->cur;
2812
2813         tx = (void *)cmd->data;
2814         memset(tx, 0, sizeof(*tx));
2815
2816         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2817
2818         /* Encrypt the frame if need be. */
2819         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2820                 /* Retrieve key for TX && do software encryption. */
2821                 k = ieee80211_crypto_encap(ni, m);
2822                 if (k == NULL) {
2823                         m_freem(m);
2824                         return (ENOBUFS);
2825                 }
2826                 /* 802.11 header may have moved. */
2827                 wh = mtod(m, struct ieee80211_frame *);
2828         }
2829
2830         if (ieee80211_radiotap_active_vap(vap)) {
2831                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2832
2833                 tap->wt_flags = 0;
2834                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2835                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2836                 tap->wt_rate = rinfo->rate;
2837                 if (k != NULL)
2838                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2839                 ieee80211_radiotap_tx(vap, m);
2840         }
2841
2842
2843         totlen = m->m_pkthdr.len;
2844
2845         flags = 0;
2846         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2847                 flags |= IWM_TX_CMD_FLG_ACK;
2848         }
2849
2850         if (type != IEEE80211_FC0_TYPE_DATA
2851             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2852             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2853                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2854         }
2855
2856         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2857             type != IEEE80211_FC0_TYPE_DATA)
2858                 tx->sta_id = sc->sc_aux_sta.sta_id;
2859         else
2860                 tx->sta_id = IWM_STATION_ID;
2861
2862         if (type == IEEE80211_FC0_TYPE_MGT) {
2863                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2864
2865                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2866                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2867                         tx->pm_frame_timeout = htole16(3);
2868                 else
2869                         tx->pm_frame_timeout = htole16(2);
2870         } else {
2871                 tx->pm_frame_timeout = htole16(0);
2872         }
2873
2874         if (hdrlen & 3) {
2875                 /* First segment length must be a multiple of 4. */
2876                 flags |= IWM_TX_CMD_FLG_MH_PAD;
2877                 pad = 4 - (hdrlen & 3);
2878         } else
2879                 pad = 0;
2880
2881         tx->driver_txop = 0;
2882         tx->next_frame_len = 0;
2883
2884         tx->len = htole16(totlen);
2885         tx->tid_tspec = tid;
2886         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2887
2888         /* Set physical address of "scratch area". */
2889         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2890         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2891
2892         /* Copy 802.11 header in TX command. */
2893         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2894
2895         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2896
2897         tx->sec_ctl = 0;
2898         tx->tx_flags |= htole32(flags);
2899
2900         /* Trim 802.11 header. */
2901         m_adj(m, hdrlen);
2902 #if defined(__DragonFly__)
2903         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2904                                             segs, IWM_MAX_SCATTER - 2,
2905                                             &nsegs, BUS_DMA_NOWAIT);
2906 #else
2907         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2908             segs, &nsegs, BUS_DMA_NOWAIT);
2909 #endif
2910         if (error != 0) {
2911                 if (error != EFBIG) {
2912                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2913                             error);
2914                         m_freem(m);
2915                         return error;
2916                 }
2917                 /* Too many DMA segments, linearize mbuf. */
2918                 if (m_defrag(m, M_NOWAIT)) {
2919                         device_printf(sc->sc_dev,
2920                             "%s: could not defrag mbuf\n", __func__);
2921                         m_freem(m);
2922                         return (ENOBUFS);
2923                 }
2924
2925 #if defined(__DragonFly__)
2926                 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2927                                                     segs, IWM_MAX_SCATTER - 2,
2928                                                     &nsegs, BUS_DMA_NOWAIT);
2929 #else
2930                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2931                     segs, &nsegs, BUS_DMA_NOWAIT);
2932 #endif
2933                 if (error != 0) {
2934                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2935                             error);
2936                         m_freem(m);
2937                         return error;
2938                 }
2939         }
2940         data->m = m;
2941         data->in = in;
2942         data->done = 0;
2943
2944         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2945             "sending txd %p, in %p\n", data, data->in);
2946         KASSERT(data->in != NULL, ("node is NULL"));
2947
2948         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2949             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
2950             ring->qid, ring->cur, totlen, nsegs,
2951             le32toh(tx->tx_flags),
2952             le32toh(tx->rate_n_flags),
2953             tx->initial_rate_index
2954             );
2955
2956         /* Fill TX descriptor. */
2957         desc->num_tbs = 2 + nsegs;
2958
2959         desc->tbs[0].lo = htole32(data->cmd_paddr);
2960         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2961             (TB0_SIZE << 4);
2962         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2963         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2964             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2965               + hdrlen + pad - TB0_SIZE) << 4);
2966
2967         /* Other DMA segments are for data payload. */
2968         for (i = 0; i < nsegs; i++) {
2969                 seg = &segs[i];
2970                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
2971                 desc->tbs[i+2].hi_n_len = \
2972                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2973                     | ((seg->ds_len) << 4);
2974         }
2975
2976         bus_dmamap_sync(ring->data_dmat, data->map,
2977             BUS_DMASYNC_PREWRITE);
2978         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2979             BUS_DMASYNC_PREWRITE);
2980         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2981             BUS_DMASYNC_PREWRITE);
2982
2983 #if 0
2984         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2985 #endif
2986
2987         /* Kick TX ring. */
2988         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2989         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2990
2991         /* Mark TX ring as full if we reach a certain threshold. */
2992         if (++ring->queued > IWM_TX_RING_HIMARK) {
2993                 sc->qfullmsk |= 1 << ring->qid;
2994         }
2995
2996         return 0;
2997 }
2998
2999 static int
3000 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3001     const struct ieee80211_bpf_params *params)
3002 {
3003         struct ieee80211com *ic = ni->ni_ic;
3004         struct iwm_softc *sc = ic->ic_softc;
3005         int error = 0;
3006
3007         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3008             "->%s begin\n", __func__);
3009
3010         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3011                 m_freem(m);
3012                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3013                     "<-%s not RUNNING\n", __func__);
3014                 return (ENETDOWN);
3015         }
3016
3017         IWM_LOCK(sc);
3018         /* XXX fix this */
3019         if (params == NULL) {
3020                 error = iwm_tx(sc, m, ni, 0);
3021         } else {
3022                 error = iwm_tx(sc, m, ni, 0);
3023         }
3024         sc->sc_tx_timer = 5;
3025         IWM_UNLOCK(sc);
3026
3027         return (error);
3028 }
3029
3030 /*
3031  * mvm/tx.c
3032  */
3033
3034 #if 0
3035 /*
3036  * Note that there are transports that buffer frames before they reach
3037  * the firmware. This means that after flush_tx_path is called, the
3038  * queue might not be empty. The race-free way to handle this is to:
3039  * 1) set the station as draining
3040  * 2) flush the Tx path
3041  * 3) wait for the transport queues to be empty
3042  */
3043 int
3044 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3045 {
3046         struct iwm_tx_path_flush_cmd flush_cmd = {
3047                 .queues_ctl = htole32(tfd_msk),
3048                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3049         };
3050         int ret;
3051
3052         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3053             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3054             sizeof(flush_cmd), &flush_cmd);
3055         if (ret)
3056                 device_printf(sc->sc_dev,
3057                     "Flushing tx queue failed: %d\n", ret);
3058         return ret;
3059 }
3060 #endif
3061
3062 static void
3063 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3064         struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3065 {
3066         memset(cmd_v5, 0, sizeof(*cmd_v5));
3067
3068         cmd_v5->add_modify = cmd_v6->add_modify;
3069         cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3070         cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3071         IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3072         cmd_v5->sta_id = cmd_v6->sta_id;
3073         cmd_v5->modify_mask = cmd_v6->modify_mask;
3074         cmd_v5->station_flags = cmd_v6->station_flags;
3075         cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3076         cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3077         cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3078         cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3079         cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3080         cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3081         cmd_v5->assoc_id = cmd_v6->assoc_id;
3082         cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3083         cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3084 }
3085
3086 static int
3087 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3088         struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3089 {
3090         struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3091
3092         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3093                 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3094                     sizeof(*cmd), cmd, status);
3095         }
3096
3097         iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3098
3099         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3100             &cmd_v5, status);
3101 }
3102
3103 /* send station add/update command to firmware */
3104 static int
3105 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3106 {
3107         struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3108         int ret;
3109         uint32_t status;
3110
3111         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3112
3113         add_sta_cmd.sta_id = IWM_STATION_ID;
3114         add_sta_cmd.mac_id_n_color
3115             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3116                 IWM_DEFAULT_COLOR));
3117         if (!update) {
3118                 add_sta_cmd.tfd_queue_msk = htole32(0xf);
3119                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3120         }
3121         add_sta_cmd.add_modify = update ? 1 : 0;
3122         add_sta_cmd.station_flags_msk
3123             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3124
3125         status = IWM_ADD_STA_SUCCESS;
3126         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3127         if (ret)
3128                 return ret;
3129
3130         switch (status) {
3131         case IWM_ADD_STA_SUCCESS:
3132                 break;
3133         default:
3134                 ret = EIO;
3135                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3136                 break;
3137         }
3138
3139         return ret;
3140 }
3141
3142 static int
3143 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3144 {
3145         int ret;
3146
3147         ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3148         if (ret)
3149                 return ret;
3150
3151         return 0;
3152 }
3153
3154 static int
3155 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3156 {
3157         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3158 }
3159
3160 static int
3161 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3162         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3163 {
3164         struct iwm_mvm_add_sta_cmd_v6 cmd;
3165         int ret;
3166         uint32_t status;
3167
3168         memset(&cmd, 0, sizeof(cmd));
3169         cmd.sta_id = sta->sta_id;
3170         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3171
3172         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3173
3174         if (addr)
3175                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3176
3177         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3178         if (ret)
3179                 return ret;
3180
3181         switch (status) {
3182         case IWM_ADD_STA_SUCCESS:
3183                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3184                     "%s: Internal station added.\n", __func__);
3185                 return 0;
3186         default:
3187                 device_printf(sc->sc_dev,
3188                     "%s: Add internal station failed, status=0x%x\n",
3189                     __func__, status);
3190                 ret = EIO;
3191                 break;
3192         }
3193         return ret;
3194 }
3195
3196 static int
3197 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3198 {
3199         int ret;
3200
3201         sc->sc_aux_sta.sta_id = 3;
3202         sc->sc_aux_sta.tfd_queue_msk = 0;
3203
3204         ret = iwm_mvm_add_int_sta_common(sc,
3205             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3206
3207         if (ret)
3208                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3209         return ret;
3210 }
3211
3212 static int
3213 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3214 {
3215         struct iwm_time_quota_cmd cmd;
3216         int i, idx, ret, num_active_macs, quota, quota_rem;
3217         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3218         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3219         uint16_t id;
3220
3221         memset(&cmd, 0, sizeof(cmd));
3222
3223         /* currently, PHY ID == binding ID */
3224         if (in) {
3225                 id = in->in_phyctxt->id;
3226                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3227                 colors[id] = in->in_phyctxt->color;
3228
3229                 if (1)
3230                         n_ifs[id] = 1;
3231         }
3232
3233         /*
3234          * The FW's scheduling session consists of
3235          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3236          * equally between all the bindings that require quota
3237          */
3238         num_active_macs = 0;
3239         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3240                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3241                 num_active_macs += n_ifs[i];
3242         }
3243
3244         quota = 0;
3245         quota_rem = 0;
3246         if (num_active_macs) {
3247                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3248                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3249         }
3250
3251         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3252                 if (colors[i] < 0)
3253                         continue;
3254
3255                 cmd.quotas[idx].id_and_color =
3256                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3257
3258                 if (n_ifs[i] <= 0) {
3259                         cmd.quotas[idx].quota = htole32(0);
3260                         cmd.quotas[idx].max_duration = htole32(0);
3261                 } else {
3262                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3263                         cmd.quotas[idx].max_duration = htole32(0);
3264                 }
3265                 idx++;
3266         }
3267
3268         /* Give the remainder of the session to the first binding */
3269         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3270
3271         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3272             sizeof(cmd), &cmd);
3273         if (ret)
3274                 device_printf(sc->sc_dev,
3275                     "%s: Failed to send quota: %d\n", __func__, ret);
3276         return ret;
3277 }
3278
3279 /*
3280  * ieee80211 routines
3281  */
3282
3283 /*
3284  * Change to AUTH state in 80211 state machine.  Roughly matches what
3285  * Linux does in bss_info_changed().
3286  */
3287 static int
3288 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3289 {
3290         struct ieee80211_node *ni;
3291         struct iwm_node *in;
3292         struct iwm_vap *iv = IWM_VAP(vap);
3293         uint32_t duration;
3294         int error;
3295
3296         /*
3297          * XXX i have a feeling that the vap node is being
3298          * freed from underneath us. Grr.
3299          */
3300         ni = ieee80211_ref_node(vap->iv_bss);
3301         in = IWM_NODE(ni);
3302         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3303             "%s: called; vap=%p, bss ni=%p\n",
3304             __func__,
3305             vap,
3306             ni);
3307
3308         in->in_assoc = 0;
3309
3310         error = iwm_allow_mcast(vap, sc);
3311         if (error) {
3312                 device_printf(sc->sc_dev,
3313                     "%s: failed to set multicast\n", __func__);
3314                 goto out;
3315         }
3316
3317         /*
3318          * This is where it deviates from what Linux does.
3319          *
3320          * Linux iwlwifi doesn't reset the nic each time, nor does it
3321          * call ctxt_add() here.  Instead, it adds it during vap creation,
3322          * and always does does a mac_ctx_changed().
3323          *
3324          * The openbsd port doesn't attempt to do that - it reset things
3325          * at odd states and does the add here.
3326          *
3327          * So, until the state handling is fixed (ie, we never reset
3328          * the NIC except for a firmware failure, which should drag
3329          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3330          * contexts that are required), let's do a dirty hack here.
3331          */
3332         if (iv->is_uploaded) {
3333                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3334                         device_printf(sc->sc_dev,
3335                             "%s: failed to update MAC\n", __func__);
3336                         goto out;
3337                 }
3338                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3339                     in->in_ni.ni_chan, 1, 1)) != 0) {
3340                         device_printf(sc->sc_dev,
3341                             "%s: failed update phy ctxt\n", __func__);
3342                         goto out;
3343                 }
3344                 in->in_phyctxt = &sc->sc_phyctxt[0];
3345
3346                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3347                         device_printf(sc->sc_dev,
3348                             "%s: binding update cmd\n", __func__);
3349                         goto out;
3350                 }
3351                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3352                         device_printf(sc->sc_dev,
3353                             "%s: failed to update sta\n", __func__);
3354                         goto out;
3355                 }
3356         } else {
3357                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3358                         device_printf(sc->sc_dev,
3359                             "%s: failed to add MAC\n", __func__);
3360                         goto out;
3361                 }
3362                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3363                     in->in_ni.ni_chan, 1, 1)) != 0) {
3364                         device_printf(sc->sc_dev,
3365                             "%s: failed add phy ctxt!\n", __func__);
3366                         error = ETIMEDOUT;
3367                         goto out;
3368                 }
3369                 in->in_phyctxt = &sc->sc_phyctxt[0];
3370
3371                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3372                         device_printf(sc->sc_dev,
3373                             "%s: binding add cmd\n", __func__);
3374                         goto out;
3375                 }
3376                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3377                         device_printf(sc->sc_dev,
3378                             "%s: failed to add sta\n", __func__);
3379                         goto out;
3380                 }
3381         }
3382
3383         /*
3384          * Prevent the FW from wandering off channel during association
3385          * by "protecting" the session with a time event.
3386          */
3387         /* XXX duration is in units of TU, not MS */
3388         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3389         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3390         DELAY(100);
3391
3392         error = 0;
3393 out:
3394         ieee80211_free_node(ni);
3395         return (error);
3396 }
3397
3398 static int
3399 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3400 {
3401         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3402         int error;
3403
3404         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3405                 device_printf(sc->sc_dev,
3406                     "%s: failed to update STA\n", __func__);
3407                 return error;
3408         }
3409
3410         in->in_assoc = 1;
3411         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3412                 device_printf(sc->sc_dev,
3413                     "%s: failed to update MAC\n", __func__);
3414                 return error;
3415         }
3416
3417         return 0;
3418 }
3419
3420 static int
3421 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3422 {
3423         /*
3424          * Ok, so *technically* the proper set of calls for going
3425          * from RUN back to SCAN is:
3426          *
3427          * iwm_mvm_power_mac_disable(sc, in);
3428          * iwm_mvm_mac_ctxt_changed(sc, in);
3429          * iwm_mvm_rm_sta(sc, in);
3430          * iwm_mvm_update_quotas(sc, NULL);
3431          * iwm_mvm_mac_ctxt_changed(sc, in);
3432          * iwm_mvm_binding_remove_vif(sc, in);
3433          * iwm_mvm_mac_ctxt_remove(sc, in);
3434          *
3435          * However, that freezes the device not matter which permutations
3436          * and modifications are attempted.  Obviously, this driver is missing
3437          * something since it works in the Linux driver, but figuring out what
3438          * is missing is a little more complicated.  Now, since we're going
3439          * back to nothing anyway, we'll just do a complete device reset.
3440          * Up your's, device!
3441          */
3442         //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3443         iwm_stop_device(sc);
3444         iwm_init_hw(sc);
3445         if (in)
3446                 in->in_assoc = 0;
3447         return 0;
3448
3449 #if 0
3450         int error;
3451
3452         iwm_mvm_power_mac_disable(sc, in);
3453
3454         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3455                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3456                 return error;
3457         }
3458
3459         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3460                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3461                 return error;
3462         }
3463         error = iwm_mvm_rm_sta(sc, in);
3464         in->in_assoc = 0;
3465         iwm_mvm_update_quotas(sc, NULL);
3466         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3467                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3468                 return error;
3469         }
3470         iwm_mvm_binding_remove_vif(sc, in);
3471
3472         iwm_mvm_mac_ctxt_remove(sc, in);
3473
3474         return error;
3475 #endif
3476 }
3477
3478 static struct ieee80211_node *
3479 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3480 {
3481         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3482             M_INTWAIT | M_ZERO);
3483 }
3484
3485 static void
3486 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3487 {
3488         struct ieee80211_node *ni = &in->in_ni;
3489         struct iwm_lq_cmd *lq = &in->in_lq;
3490         int nrates = ni->ni_rates.rs_nrates;
3491         int i, ridx, tab = 0;
3492         int txant = 0;
3493
3494         if (nrates > nitems(lq->rs_table)) {
3495                 device_printf(sc->sc_dev,
3496                     "%s: node supports %d rates, driver handles "
3497                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3498                 return;
3499         }
3500         if (nrates == 0) {
3501                 device_printf(sc->sc_dev,
3502                     "%s: node supports 0 rates, odd!\n", __func__);
3503                 return;
3504         }
3505
3506         /*
3507          * XXX .. and most of iwm_node is not initialised explicitly;
3508          * it's all just 0x0 passed to the firmware.
3509          */
3510
3511         /* first figure out which rates we should support */
3512         /* XXX TODO: this isn't 11n aware /at all/ */
3513         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3514         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3515             "%s: nrates=%d\n", __func__, nrates);
3516
3517         /*
3518          * Loop over nrates and populate in_ridx from the highest
3519          * rate to the lowest rate.  Remember, in_ridx[] has
3520          * IEEE80211_RATE_MAXSIZE entries!
3521          */
3522         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3523                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3524
3525                 /* Map 802.11 rate to HW rate index. */
3526                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3527                         if (iwm_rates[ridx].rate == rate)
3528                                 break;
3529                 if (ridx > IWM_RIDX_MAX) {
3530                         device_printf(sc->sc_dev,
3531                             "%s: WARNING: device rate for %d not found!\n",
3532                             __func__, rate);
3533                 } else {
3534                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
3536                             __func__,
3537                             i,
3538                             rate,
3539                             ridx);
3540                         in->in_ridx[i] = ridx;
3541                 }
3542         }
3543
3544         /* then construct a lq_cmd based on those */
3545         memset(lq, 0, sizeof(*lq));
3546         lq->sta_id = IWM_STATION_ID;
3547
3548         /*
3549          * are these used? (we don't do SISO or MIMO)
3550          * need to set them to non-zero, though, or we get an error.
3551          */
3552         lq->single_stream_ant_msk = 1;
3553         lq->dual_stream_ant_msk = 1;
3554
3555         /*
3556          * Build the actual rate selection table.
3557          * The lowest bits are the rates.  Additionally,
3558          * CCK needs bit 9 to be set.  The rest of the bits
3559          * we add to the table select the tx antenna
3560          * Note that we add the rates in the highest rate first
3561          * (opposite of ni_rates).
3562          */
3563         /*
3564          * XXX TODO: this should be looping over the min of nrates
3565          * and LQ_MAX_RETRY_NUM.  Sigh.
3566          */
3567         for (i = 0; i < nrates; i++) {
3568                 int nextant;
3569
3570                 if (txant == 0)
3571                         txant = IWM_FW_VALID_TX_ANT(sc);
3572                 nextant = 1<<(ffs(txant)-1);
3573                 txant &= ~nextant;
3574
3575                 /*
3576                  * Map the rate id into a rate index into
3577                  * our hardware table containing the
3578                  * configuration to use for this rate.
3579                  */
3580                 ridx = in->in_ridx[i];
3581                 tab = iwm_rates[ridx].plcp;
3582                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3583                 if (IWM_RIDX_IS_CCK(ridx))
3584                         tab |= IWM_RATE_MCS_CCK_MSK;
3585                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3586                     "station rate i=%d, rate=%d, hw=%x\n",
3587                     i, iwm_rates[ridx].rate, tab);
3588                 lq->rs_table[i] = htole32(tab);
3589         }
3590         /* then fill the rest with the lowest possible rate */
3591         for (i = nrates; i < nitems(lq->rs_table); i++) {
3592                 KASSERT(tab != 0, ("invalid tab"));
3593                 lq->rs_table[i] = htole32(tab);
3594         }
3595 }
3596
3597 static int
3598 iwm_media_change(struct ifnet *ifp)
3599 {
3600         struct ieee80211vap *vap = ifp->if_softc;
3601         struct ieee80211com *ic = vap->iv_ic;
3602         struct iwm_softc *sc = ic->ic_softc;
3603         int error;
3604
3605         error = ieee80211_media_change(ifp);
3606         if (error != ENETRESET)
3607                 return error;
3608
3609         IWM_LOCK(sc);
3610         if (ic->ic_nrunning > 0) {
3611                 iwm_stop(sc);
3612                 iwm_init(sc);
3613         }
3614         IWM_UNLOCK(sc);
3615         return error;
3616 }
3617
3618
3619 static int
3620 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3621 {
3622         struct iwm_vap *ivp = IWM_VAP(vap);
3623         struct ieee80211com *ic = vap->iv_ic;
3624         struct iwm_softc *sc = ic->ic_softc;
3625         struct iwm_node *in;
3626         int error;
3627
3628         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3629             "switching state %s -> %s\n",
3630             ieee80211_state_name[vap->iv_state],
3631             ieee80211_state_name[nstate]);
3632         IEEE80211_UNLOCK(ic);
3633         IWM_LOCK(sc);
3634
3635         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
3636                 iwm_led_blink_stop(sc);
3637
3638         /* disable beacon filtering if we're hopping out of RUN */
3639         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3640                 iwm_mvm_disable_beacon_filter(sc);
3641
3642                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3643                         in->in_assoc = 0;
3644
3645                 iwm_release(sc, NULL);
3646
3647                 /*
3648                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
3649                  * above then the card will be completely reinitialized,
3650                  * so the driver must do everything necessary to bring the card
3651                  * from INIT to SCAN.
3652                  *
3653                  * Additionally, upon receiving deauth frame from AP,
3654                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3655                  * state. This will also fail with this driver, so bring the FSM
3656                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3657                  *
3658                  * XXX TODO: fix this for FreeBSD!
3659                  */
3660                 if (nstate == IEEE80211_S_SCAN ||
3661                     nstate == IEEE80211_S_AUTH ||
3662                     nstate == IEEE80211_S_ASSOC) {
3663                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3664                             "Force transition to INIT; MGT=%d\n", arg);
3665                         IWM_UNLOCK(sc);
3666                         IEEE80211_LOCK(ic);
3667                         vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3668                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3669                             "Going INIT->SCAN\n");
3670                         nstate = IEEE80211_S_SCAN;
3671                         IEEE80211_UNLOCK(ic);
3672                         IWM_LOCK(sc);
3673                 }
3674         }
3675
3676         switch (nstate) {
3677         case IEEE80211_S_INIT:
3678                 sc->sc_scanband = 0;
3679                 break;
3680
3681         case IEEE80211_S_AUTH:
3682                 if ((error = iwm_auth(vap, sc)) != 0) {
3683                         device_printf(sc->sc_dev,
3684                             "%s: could not move to auth state: %d\n",
3685                             __func__, error);
3686                         break;
3687                 }
3688                 break;
3689
3690         case IEEE80211_S_ASSOC:
3691                 if ((error = iwm_assoc(vap, sc)) != 0) {
3692                         device_printf(sc->sc_dev,
3693                             "%s: failed to associate: %d\n", __func__,
3694                             error);
3695                         break;
3696                 }
3697                 break;
3698
3699         case IEEE80211_S_RUN:
3700         {
3701                 struct iwm_host_cmd cmd = {
3702                         .id = IWM_LQ_CMD,
3703                         .len = { sizeof(in->in_lq), },
3704                         .flags = IWM_CMD_SYNC,
3705                 };
3706
3707                 /* Update the association state, now we have it all */
3708                 /* (eg associd comes in at this point */
3709                 error = iwm_assoc(vap, sc);
3710                 if (error != 0) {
3711                         device_printf(sc->sc_dev,
3712                             "%s: failed to update association state: %d\n",
3713                             __func__,
3714                             error);
3715                         break;
3716                 }
3717
3718                 in = IWM_NODE(vap->iv_bss);
3719                 iwm_mvm_power_mac_update_mode(sc, in);
3720                 iwm_mvm_enable_beacon_filter(sc, in);
3721                 iwm_mvm_update_quotas(sc, in);
3722                 iwm_setrates(sc, in);
3723
3724                 cmd.data[0] = &in->in_lq;
3725                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3726                         device_printf(sc->sc_dev,
3727                             "%s: IWM_LQ_CMD failed\n", __func__);
3728                 }
3729
3730                 iwm_mvm_led_enable(sc);
3731                 break;
3732         }
3733
3734         default:
3735                 break;
3736         }
3737         IWM_UNLOCK(sc);
3738         IEEE80211_LOCK(ic);
3739
3740         return (ivp->iv_newstate(vap, nstate, arg));
3741 }
3742
3743 void
3744 iwm_endscan_cb(void *arg, int pending)
3745 {
3746         struct iwm_softc *sc = arg;
3747         struct ieee80211com *ic = &sc->sc_ic;
3748         int done;
3749         int error;
3750
3751         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3752             "%s: scan ended\n",
3753             __func__);
3754
3755         IWM_LOCK(sc);
3756         if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3757             sc->sc_nvm.sku_cap_band_52GHz_enable) {
3758                 done = 0;
3759                 if ((error = iwm_mvm_scan_request(sc,
3760                     IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3761                         device_printf(sc->sc_dev, "could not initiate scan\n");
3762                         done = 1;
3763                 }
3764         } else {
3765                 done = 1;
3766         }
3767
3768         if (done) {
3769                 IWM_UNLOCK(sc);
3770                 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3771                 IWM_LOCK(sc);
3772                 sc->sc_scanband = 0;
3773         }
3774         IWM_UNLOCK(sc);
3775 }
3776
3777 static int
3778 iwm_init_hw(struct iwm_softc *sc)
3779 {
3780         struct ieee80211com *ic = &sc->sc_ic;
3781         int error, i, qid;
3782
3783         if ((error = iwm_start_hw(sc)) != 0) {
3784                 kprintf("iwm_start_hw: failed %d\n", error);
3785                 return error;
3786         }
3787
3788         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3789                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3790                 return error;
3791         }
3792
3793         /*
3794          * should stop and start HW since that INIT
3795          * image just loaded
3796          */
3797         iwm_stop_device(sc);
3798         if ((error = iwm_start_hw(sc)) != 0) {
3799                 device_printf(sc->sc_dev, "could not initialize hardware\n");
3800                 return error;
3801         }
3802
3803         /* omstart, this time with the regular firmware */
3804         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3805         if (error) {
3806                 device_printf(sc->sc_dev, "could not load firmware\n");
3807                 goto error;
3808         }
3809
3810         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3811                 device_printf(sc->sc_dev, "antenna config failed\n");
3812                 goto error;
3813         }
3814
3815         /* Send phy db control command and then phy db calibration*/
3816         if ((error = iwm_send_phy_db_data(sc)) != 0) {
3817                 device_printf(sc->sc_dev, "phy_db_data failed\n");
3818                 goto error;
3819         }
3820
3821         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3822                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3823                 goto error;
3824         }
3825
3826         /* Add auxiliary station for scanning */
3827         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3828                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
3829                 goto error;
3830         }
3831
3832         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3833                 /*
3834                  * The channel used here isn't relevant as it's
3835                  * going to be overwritten in the other flows.
3836                  * For now use the first channel we have.
3837                  */
3838                 if ((error = iwm_mvm_phy_ctxt_add(sc,
3839                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3840                         goto error;
3841         }
3842
3843         error = iwm_mvm_power_update_device(sc);
3844         if (error)
3845                 goto error;
3846
3847         /* Mark TX rings as active. */
3848         for (qid = 0; qid < 4; qid++) {
3849                 iwm_enable_txq(sc, qid, qid);
3850         }
3851
3852         return 0;
3853
3854  error:
3855         iwm_stop_device(sc);
3856         return error;
3857 }
3858
3859 /* Allow multicast from our BSSID. */
3860 static int
3861 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3862 {
3863         struct ieee80211_node *ni = vap->iv_bss;
3864         struct iwm_mcast_filter_cmd *cmd;
3865         size_t size;
3866         int error;
3867
3868         size = roundup(sizeof(*cmd), 4);
3869         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3870         if (cmd == NULL)
3871                 return ENOMEM;
3872         cmd->filter_own = 1;
3873         cmd->port_id = 0;
3874         cmd->count = 0;
3875         cmd->pass_all = 1;
3876         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3877
3878         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3879             IWM_CMD_SYNC, size, cmd);
3880         kfree(cmd, M_DEVBUF);
3881
3882         return (error);
3883 }
3884
3885 /*
3886  * ifnet interfaces
3887  */
3888
3889 static void
3890 iwm_init(struct iwm_softc *sc)
3891 {
3892         int error;
3893
3894         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3895                 return;
3896         }
3897         sc->sc_generation++;
3898         sc->sc_flags &= ~IWM_FLAG_STOPPED;
3899
3900         if ((error = iwm_init_hw(sc)) != 0) {
3901                 kprintf("iwm_init_hw failed %d\n", error);
3902                 iwm_stop(sc);
3903                 return;
3904         }
3905
3906         /*
3907          * Ok, firmware loaded and we are jogging
3908          */
3909         sc->sc_flags |= IWM_FLAG_HW_INITED;
3910         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3911 }
3912
3913 static int
3914 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3915 {
3916         struct iwm_softc *sc;
3917         int error;
3918
3919         sc = ic->ic_softc;
3920
3921         IWM_LOCK(sc);
3922         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3923                 IWM_UNLOCK(sc);
3924                 return (ENXIO);
3925         }
3926         error = mbufq_enqueue(&sc->sc_snd, m);
3927         if (error) {
3928                 IWM_UNLOCK(sc);
3929                 return (error);
3930         }
3931         iwm_start(sc);
3932         IWM_UNLOCK(sc);
3933         return (0);
3934 }
3935
3936 /*
3937  * Dequeue packets from sendq and call send.
3938  */
3939 static void
3940 iwm_start(struct iwm_softc *sc)
3941 {
3942         struct ieee80211_node *ni;
3943         struct mbuf *m;
3944         int ac = 0;
3945
3946         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3947         while (sc->qfullmsk == 0 &&
3948                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3949                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3950                 if (iwm_tx(sc, m, ni, ac) != 0) {
3951                         if_inc_counter(ni->ni_vap->iv_ifp,
3952                             IFCOUNTER_OERRORS, 1);
3953                         ieee80211_free_node(ni);
3954                         continue;
3955                 }
3956                 sc->sc_tx_timer = 15;
3957         }
3958         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3959 }
3960
3961 static void
3962 iwm_stop(struct iwm_softc *sc)
3963 {
3964
3965         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3966         sc->sc_flags |= IWM_FLAG_STOPPED;
3967         sc->sc_generation++;
3968         sc->sc_scanband = 0;
3969         iwm_led_blink_stop(sc);
3970         sc->sc_tx_timer = 0;
3971         iwm_stop_device(sc);
3972 }
3973
3974 static void
3975 iwm_watchdog(void *arg)
3976 {
3977         struct iwm_softc *sc = arg;
3978
3979         if (sc->sc_tx_timer > 0) {
3980                 if (--sc->sc_tx_timer == 0) {
3981                         device_printf(sc->sc_dev, "device timeout\n");
3982 #ifdef IWM_DEBUG
3983                         iwm_nic_error(sc);
3984 #endif
3985                         iwm_stop(sc);
3986 #if defined(__DragonFly__)
3987                         ++sc->sc_ic.ic_oerrors;
3988 #else
3989                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
3990 #endif
3991                         return;
3992                 }
3993         }
3994         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3995 }
3996
3997 static void
3998 iwm_parent(struct ieee80211com *ic)
3999 {
4000         struct iwm_softc *sc = ic->ic_softc;
4001         int startall = 0;
4002
4003         IWM_LOCK(sc);
4004         if (ic->ic_nrunning > 0) {
4005                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4006                         iwm_init(sc);
4007                         startall = 1;
4008                 }
4009         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4010                 iwm_stop(sc);
4011         IWM_UNLOCK(sc);
4012         if (startall)
4013                 ieee80211_start_all(ic);
4014 }
4015
4016 /*
4017  * The interrupt side of things
4018  */
4019
4020 /*
4021  * error dumping routines are from iwlwifi/mvm/utils.c
4022  */
4023
4024 /*
4025  * Note: This structure is read from the device with IO accesses,
4026  * and the reading already does the endian conversion. As it is
4027  * read with uint32_t-sized accesses, any members with a different size
4028  * need to be ordered correctly though!
4029  */
4030 struct iwm_error_event_table {
4031         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4032         uint32_t error_id;              /* type of error */
4033         uint32_t pc;                    /* program counter */
4034         uint32_t blink1;                /* branch link */
4035         uint32_t blink2;                /* branch link */
4036         uint32_t ilink1;                /* interrupt link */
4037         uint32_t ilink2;                /* interrupt link */
4038         uint32_t data1;         /* error-specific data */
4039         uint32_t data2;         /* error-specific data */
4040         uint32_t data3;         /* error-specific data */
4041         uint32_t bcon_time;             /* beacon timer */
4042         uint32_t tsf_low;               /* network timestamp function timer */
4043         uint32_t tsf_hi;                /* network timestamp function timer */
4044         uint32_t gp1;           /* GP1 timer register */
4045         uint32_t gp2;           /* GP2 timer register */
4046         uint32_t gp3;           /* GP3 timer register */
4047         uint32_t ucode_ver;             /* uCode version */
4048         uint32_t hw_ver;                /* HW Silicon version */
4049         uint32_t brd_ver;               /* HW board version */
4050         uint32_t log_pc;                /* log program counter */
4051         uint32_t frame_ptr;             /* frame pointer */
4052         uint32_t stack_ptr;             /* stack pointer */
4053         uint32_t hcmd;          /* last host command header */
4054         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4055                                  * rxtx_flag */
4056         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4057                                  * host_flag */
4058         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4059                                  * enc_flag */
4060         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4061                                  * time_flag */
4062         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4063                                  * wico interrupt */
4064         uint32_t isr_pref;              /* isr status register LMPM_NIC_PREF_STAT */
4065         uint32_t wait_event;            /* wait event() caller address */
4066         uint32_t l2p_control;   /* L2pControlField */
4067         uint32_t l2p_duration;  /* L2pDurationField */
4068         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4069         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4070         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4071                                  * (LMPM_PMG_SEL) */
4072         uint32_t u_timestamp;   /* indicate when the date and time of the
4073                                  * compilation */
4074         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4075 } __packed;
4076
4077 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4078 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4079
4080 #ifdef IWM_DEBUG
4081 struct {
4082         const char *name;
4083         uint8_t num;
4084 } advanced_lookup[] = {
4085         { "NMI_INTERRUPT_WDG", 0x34 },
4086         { "SYSASSERT", 0x35 },
4087         { "UCODE_VERSION_MISMATCH", 0x37 },
4088         { "BAD_COMMAND", 0x38 },
4089         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4090         { "FATAL_ERROR", 0x3D },
4091         { "NMI_TRM_HW_ERR", 0x46 },
4092         { "NMI_INTERRUPT_TRM", 0x4C },
4093         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4094         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4095         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4096         { "NMI_INTERRUPT_HOST", 0x66 },
4097         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4098         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4099         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4100         { "ADVANCED_SYSASSERT", 0 },
4101 };
4102
4103 static const char *
4104 iwm_desc_lookup(uint32_t num)
4105 {
4106         int i;
4107
4108         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4109                 if (advanced_lookup[i].num == num)
4110                         return advanced_lookup[i].name;
4111
4112         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4113         return advanced_lookup[i].name;
4114 }
4115
4116 /*
4117  * Support for dumping the error log seemed like a good idea ...
4118  * but it's mostly hex junk and the only sensible thing is the
4119  * hw/ucode revision (which we know anyway).  Since it's here,
4120  * I'll just leave it in, just in case e.g. the Intel guys want to
4121  * help us decipher some "ADVANCED_SYSASSERT" later.
4122  */
4123 static void
4124 iwm_nic_error(struct iwm_softc *sc)
4125 {
4126         struct iwm_error_event_table table;
4127         uint32_t base;
4128
4129         device_printf(sc->sc_dev, "dumping device error log\n");
4130         base = sc->sc_uc.uc_error_event_table;
4131         if (base < 0x800000 || base >= 0x80C000) {
4132                 device_printf(sc->sc_dev,
4133                     "Not valid error log pointer 0x%08x\n", base);
4134                 return;
4135         }
4136
4137         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4138                 device_printf(sc->sc_dev, "reading errlog failed\n");
4139                 return;
4140         }
4141
4142         if (!table.valid) {
4143                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4144                 return;
4145         }
4146
4147         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4148                 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4149                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4150                     sc->sc_flags, table.valid);
4151         }
4152
4153         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4154                 iwm_desc_lookup(table.error_id));
4155         device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4156         device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4157         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4158         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4159         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4160         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4161         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4162         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4163         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4164         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4165         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4166         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4167         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4168         device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4169         device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4170         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4171         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4172         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4173         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4174         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4175         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4176         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4177         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4178         device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4179         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4180         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4181         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4182         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4183         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4184         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4185         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4186         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4187 }
4188 #endif
4189
4190 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4191 do {                                                                    \
4192         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4193         _var_ = (void *)((_pkt_)+1);                                    \
4194 } while (/*CONSTCOND*/0)
4195
4196 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4197 do {                                                                    \
4198         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4199         _ptr_ = (void *)((_pkt_)+1);                                    \
4200 } while (/*CONSTCOND*/0)
4201
4202 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4203
4204 /*
4205  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4206  * Basic structure from if_iwn
4207  */
4208 static void
4209 iwm_notif_intr(struct iwm_softc *sc)
4210 {
4211         uint16_t hw;
4212
4213         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4214             BUS_DMASYNC_POSTREAD);
4215
4216         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4217
4218         /*
4219          * Process responses
4220          */
4221         while (sc->rxq.cur != hw) {
4222                 struct iwm_rx_ring *ring = &sc->rxq;
4223                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4224                 struct iwm_rx_packet *pkt;
4225                 struct iwm_cmd_response *cresp;
4226                 int qid, idx;
4227
4228                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4229                     BUS_DMASYNC_POSTREAD);
4230                 pkt = mtod(data->m, struct iwm_rx_packet *);
4231
4232                 qid = pkt->hdr.qid & ~0x80;
4233                 idx = pkt->hdr.idx;
4234
4235                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4236                     "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4237                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4238                     pkt->hdr.code, sc->rxq.cur, hw);
4239
4240                 /*
4241                  * randomly get these from the firmware, no idea why.
4242                  * they at least seem harmless, so just ignore them for now
4243                  */
4244                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4245                     || pkt->len_n_flags == htole32(0x55550000))) {
4246                         ADVANCE_RXQ(sc);
4247                         continue;
4248                 }
4249
4250                 switch (pkt->hdr.code) {
4251                 case IWM_REPLY_RX_PHY_CMD:
4252                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4253                         break;
4254
4255                 case IWM_REPLY_RX_MPDU_CMD:
4256                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4257                         break;
4258
4259                 case IWM_TX_CMD:
4260                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
4261                         break;
4262
4263                 case IWM_MISSED_BEACONS_NOTIFICATION: {
4264                         struct iwm_missed_beacons_notif *resp;
4265                         int missed;
4266
4267                         /* XXX look at mac_id to determine interface ID */
4268                         struct ieee80211com *ic = &sc->sc_ic;
4269                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4270
4271                         SYNC_RESP_STRUCT(resp, pkt);
4272                         missed = le32toh(resp->consec_missed_beacons);
4273
4274                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4275                             "%s: MISSED_BEACON: mac_id=%d, "
4276                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4277                             "num_rx=%d\n",
4278                             __func__,
4279                             le32toh(resp->mac_id),
4280                             le32toh(resp->consec_missed_beacons_since_last_rx),
4281                             le32toh(resp->consec_missed_beacons),
4282                             le32toh(resp->num_expected_beacons),
4283                             le32toh(resp->num_recvd_beacons));
4284
4285                         /* Be paranoid */
4286                         if (vap == NULL)
4287                                 break;
4288
4289                         /* XXX no net80211 locking? */
4290                         if (vap->iv_state == IEEE80211_S_RUN &&
4291                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4292                                 if (missed > vap->iv_bmissthreshold) {
4293                                         /* XXX bad locking; turn into task */
4294                                         IWM_UNLOCK(sc);
4295                                         ieee80211_beacon_miss(ic);
4296                                         IWM_LOCK(sc);
4297                                 }
4298                         }
4299
4300                         break; }
4301
4302                 case IWM_MVM_ALIVE: {
4303                         struct iwm_mvm_alive_resp *resp;
4304                         SYNC_RESP_STRUCT(resp, pkt);
4305
4306                         sc->sc_uc.uc_error_event_table
4307                             = le32toh(resp->error_event_table_ptr);
4308                         sc->sc_uc.uc_log_event_table
4309                             = le32toh(resp->log_event_table_ptr);
4310                         sc->sched_base = le32toh(resp->scd_base_ptr);
4311                         sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4312
4313                         sc->sc_uc.uc_intr = 1;
4314                         wakeup(&sc->sc_uc);
4315                         break; }
4316
4317                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4318                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
4319                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
4320
4321                         iwm_phy_db_set_section(sc, phy_db_notif);
4322
4323                         break; }
4324
4325                 case IWM_STATISTICS_NOTIFICATION: {
4326                         struct iwm_notif_statistics *stats;
4327                         SYNC_RESP_STRUCT(stats, pkt);
4328                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4329                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
4330                         break; }
4331
4332                 case IWM_NVM_ACCESS_CMD:
4333                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4334                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4335                                     BUS_DMASYNC_POSTREAD);
4336                                 memcpy(sc->sc_cmd_resp,
4337                                     pkt, sizeof(sc->sc_cmd_resp));
4338                         }
4339                         break;
4340
4341                 case IWM_PHY_CONFIGURATION_CMD:
4342                 case IWM_TX_ANT_CONFIGURATION_CMD:
4343                 case IWM_ADD_STA:
4344                 case IWM_MAC_CONTEXT_CMD:
4345                 case IWM_REPLY_SF_CFG_CMD:
4346                 case IWM_POWER_TABLE_CMD:
4347                 case IWM_PHY_CONTEXT_CMD:
4348                 case IWM_BINDING_CONTEXT_CMD:
4349                 case IWM_TIME_EVENT_CMD:
4350                 case IWM_SCAN_REQUEST_CMD:
4351                 case IWM_REPLY_BEACON_FILTERING_CMD:
4352                 case IWM_MAC_PM_POWER_TABLE:
4353                 case IWM_TIME_QUOTA_CMD:
4354                 case IWM_REMOVE_STA:
4355                 case IWM_TXPATH_FLUSH:
4356                 case IWM_LQ_CMD:
4357                         SYNC_RESP_STRUCT(cresp, pkt);
4358                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4359                                 memcpy(sc->sc_cmd_resp,
4360                                     pkt, sizeof(*pkt)+sizeof(*cresp));
4361                         }
4362                         break;
4363
4364                 /* ignore */
4365                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4366                         break;
4367
4368                 case IWM_INIT_COMPLETE_NOTIF:
4369                         sc->sc_init_complete = 1;
4370                         wakeup(&sc->sc_init_complete);
4371                         break;
4372
4373                 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4374                         struct iwm_scan_complete_notif *notif;
4375                         SYNC_RESP_STRUCT(notif, pkt);
4376                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4377                         break; }
4378
4379                 case IWM_REPLY_ERROR: {
4380                         struct iwm_error_resp *resp;
4381                         SYNC_RESP_STRUCT(resp, pkt);
4382
4383                         device_printf(sc->sc_dev,
4384                             "firmware error 0x%x, cmd 0x%x\n",
4385                             le32toh(resp->error_type),
4386                             resp->cmd_id);
4387                         break; }
4388
4389                 case IWM_TIME_EVENT_NOTIFICATION: {
4390                         struct iwm_time_event_notif *notif;
4391                         SYNC_RESP_STRUCT(notif, pkt);
4392
4393                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4394                             "TE notif status = 0x%x action = 0x%x\n",
4395                             notif->status, notif->action);
4396                         break; }
4397
4398                 case IWM_MCAST_FILTER_CMD:
4399                         break;
4400
4401                 default:
4402                         device_printf(sc->sc_dev,
4403                             "frame %d/%d %x UNHANDLED (this should "
4404                             "not happen)\n", qid, idx,
4405                             pkt->len_n_flags);
4406                         break;
4407                 }
4408
4409                 /*
4410                  * Why test bit 0x80?  The Linux driver:
4411                  *
4412                  * There is one exception:  uCode sets bit 15 when it
4413                  * originates the response/notification, i.e. when the
4414                  * response/notification is not a direct response to a
4415                  * command sent by the driver.  For example, uCode issues
4416                  * IWM_REPLY_RX when it sends a received frame to the driver;
4417                  * it is not a direct response to any driver command.
4418                  *
4419                  * Ok, so since when is 7 == 15?  Well, the Linux driver
4420                  * uses a slightly different format for pkt->hdr, and "qid"
4421                  * is actually the upper byte of a two-byte field.
4422                  */
4423                 if (!(pkt->hdr.qid & (1 << 7))) {
4424                         iwm_cmd_done(sc, pkt);
4425                 }
4426
4427                 ADVANCE_RXQ(sc);
4428         }
4429
4430         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4431             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4432
4433         /*
4434          * Tell the firmware what we have processed.
4435          * Seems like the hardware gets upset unless we align
4436          * the write by 8??
4437          */
4438         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4439         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4440 }
4441
4442 static void
4443 iwm_intr(void *arg)
4444 {
4445         struct iwm_softc *sc = arg;
4446         int handled = 0;
4447         int r1, r2, rv = 0;
4448         int isperiodic = 0;
4449
4450 #if defined(__DragonFly__)
4451         if (sc->sc_mem == NULL) {
4452                 kprintf("iwm_intr: detached\n");
4453                 return;
4454         }
4455 #endif
4456         IWM_LOCK(sc);
4457         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4458
4459         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4460                 uint32_t *ict = sc->ict_dma.vaddr;
4461                 int tmp;
4462
4463                 tmp = htole32(ict[sc->ict_cur]);
4464                 if (!tmp)
4465                         goto out_ena;
4466
4467                 /*
4468                  * ok, there was something.  keep plowing until we have all.
4469                  */
4470                 r1 = r2 = 0;
4471                 while (tmp) {
4472                         r1 |= tmp;
4473                         ict[sc->ict_cur] = 0;
4474                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4475                         tmp = htole32(ict[sc->ict_cur]);
4476                 }
4477
4478                 /* this is where the fun begins.  don't ask */
4479                 if (r1 == 0xffffffff)
4480                         r1 = 0;
4481
4482                 /* i am not expected to understand this */
4483                 if (r1 & 0xc0000)
4484                         r1 |= 0x8000;
4485                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4486         } else {
4487                 r1 = IWM_READ(sc, IWM_CSR_INT);
4488                 /* "hardware gone" (where, fishing?) */
4489                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4490                         goto out;
4491                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4492         }
4493         if (r1 == 0 && r2 == 0) {
4494                 goto out_ena;
4495         }
4496
4497         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4498
4499         /* ignored */
4500         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4501
4502         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4503                 int i;
4504                 struct ieee80211com *ic = &sc->sc_ic;
4505                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4506
4507 #ifdef IWM_DEBUG
4508                 iwm_nic_error(sc);
4509 #endif
4510                 /* Dump driver status (TX and RX rings) while we're here. */
4511                 device_printf(sc->sc_dev, "driver status:\n");
4512                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4513                         struct iwm_tx_ring *ring = &sc->txq[i];
4514                         device_printf(sc->sc_dev,
4515                             "  tx ring %2d: qid=%-2d cur=%-3d "
4516                             "queued=%-3d\n",
4517                             i, ring->qid, ring->cur, ring->queued);
4518                 }
4519                 device_printf(sc->sc_dev,
4520                     "  rx ring: cur=%d\n", sc->rxq.cur);
4521                 device_printf(sc->sc_dev,
4522                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4523
4524                 /* Don't stop the device; just do a VAP restart */
4525                 IWM_UNLOCK(sc);
4526
4527                 if (vap == NULL) {
4528                         kprintf("%s: null vap\n", __func__);
4529                         return;
4530                 }
4531
4532                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4533                     "restarting\n", __func__, vap->iv_state);
4534
4535                 /* XXX TODO: turn this into a callout/taskqueue */
4536                 ieee80211_restart_all(ic);
4537                 return;
4538         }
4539
4540         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4541                 handled |= IWM_CSR_INT_BIT_HW_ERR;
4542                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4543                 iwm_stop(sc);
4544                 rv = 1;
4545                 goto out;
4546         }
4547
4548         /* firmware chunk loaded */
4549         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4550                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4551                 handled |= IWM_CSR_INT_BIT_FH_TX;
4552                 sc->sc_fw_chunk_done = 1;
4553                 wakeup(&sc->sc_fw);
4554         }
4555
4556         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4557                 handled |= IWM_CSR_INT_BIT_RF_KILL;
4558                 if (iwm_check_rfkill(sc)) {
4559                         device_printf(sc->sc_dev,
4560                             "%s: rfkill switch, disabling interface\n",
4561                             __func__);
4562                         iwm_stop(sc);
4563                 }
4564         }
4565
4566         /*
4567          * The Linux driver uses periodic interrupts to avoid races.
4568          * We cargo-cult like it's going out of fashion.
4569          */
4570         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4571                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4572                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4573                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4574                         IWM_WRITE_1(sc,
4575                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4576                 isperiodic = 1;
4577         }
4578
4579         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4580                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4581                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4582
4583                 iwm_notif_intr(sc);
4584
4585                 /* enable periodic interrupt, see above */
4586                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4587                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4588                             IWM_CSR_INT_PERIODIC_ENA);
4589         }
4590
4591         if (__predict_false(r1 & ~handled))
4592                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4593                     "%s: unhandled interrupts: %x\n", __func__, r1);
4594         rv = 1;
4595
4596  out_ena:
4597         iwm_restore_interrupts(sc);
4598  out:
4599         IWM_UNLOCK(sc);
4600         return;
4601 }
4602
4603 /*
4604  * Autoconf glue-sniffing
4605  */
4606 #define PCI_VENDOR_INTEL                0x8086
4607 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
4608 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
4609 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
4610 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
4611 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
4612 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
4613
4614 static const struct iwm_devices {
4615         uint16_t        device;
4616         const char      *name;
4617 } iwm_devices[] = {
4618         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4619         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4620         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4621         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4622         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4623         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4624 };
4625
4626 static int
4627 iwm_probe(device_t dev)
4628 {
4629         int i;
4630
4631         for (i = 0; i < nitems(iwm_devices); i++) {
4632                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4633                     pci_get_device(dev) == iwm_devices[i].device) {
4634                         device_set_desc(dev, iwm_devices[i].name);
4635                         return (BUS_PROBE_DEFAULT);
4636                 }
4637         }
4638
4639         return (ENXIO);
4640 }
4641
4642 static int
4643 iwm_dev_check(device_t dev)
4644 {
4645         struct iwm_softc *sc;
4646
4647         sc = device_get_softc(dev);
4648
4649         switch (pci_get_device(dev)) {
4650         case PCI_PRODUCT_INTEL_WL_3160_1:
4651         case PCI_PRODUCT_INTEL_WL_3160_2:
4652                 sc->sc_fwname = "iwm3160fw";
4653                 sc->host_interrupt_operation_mode = 1;
4654                 return (0);
4655         case PCI_PRODUCT_INTEL_WL_7260_1:
4656         case PCI_PRODUCT_INTEL_WL_7260_2:
4657                 sc->sc_fwname = "iwm7260fw";
4658                 sc->host_interrupt_operation_mode = 1;
4659                 return (0);
4660         case PCI_PRODUCT_INTEL_WL_7265_1:
4661         case PCI_PRODUCT_INTEL_WL_7265_2:
4662                 sc->sc_fwname = "iwm7265fw";
4663                 sc->host_interrupt_operation_mode = 0;
4664                 return (0);
4665         default:
4666                 device_printf(dev, "unknown adapter type\n");
4667                 return ENXIO;
4668         }
4669 }
4670
4671 static int
4672 iwm_pci_attach(device_t dev)
4673 {
4674         struct iwm_softc *sc;
4675         int count, error, rid;
4676         uint16_t reg;
4677 #if defined(__DragonFly__)
4678         int irq_flags;
4679 #endif
4680
4681         sc = device_get_softc(dev);
4682
4683         /* Clear device-specific "PCI retry timeout" register (41h). */
4684         reg = pci_read_config(dev, 0x40, sizeof(reg));
4685         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4686
4687         /* Enable bus-mastering and hardware bug workaround. */
4688         pci_enable_busmaster(dev);
4689         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4690         /* if !MSI */
4691         if (reg & PCIM_STATUS_INTxSTATE) {
4692                 reg &= ~PCIM_STATUS_INTxSTATE;
4693         }
4694         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4695
4696         rid = PCIR_BAR(0);
4697         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4698             RF_ACTIVE);
4699         if (sc->sc_mem == NULL) {
4700                 device_printf(sc->sc_dev, "can't map mem space\n");
4701                 return (ENXIO);
4702         }
4703         sc->sc_st = rman_get_bustag(sc->sc_mem);
4704         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4705
4706         /* Install interrupt handler. */
4707         count = 1;
4708         rid = 0;
4709 #if defined(__DragonFly__)
4710         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4711         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4712 #else
4713         if (pci_alloc_msi(dev, &count) == 0)
4714                 rid = 1;
4715         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4716             (rid != 0 ? 0 : RF_SHAREABLE));
4717 #endif
4718         if (sc->sc_irq == NULL) {
4719                 device_printf(dev, "can't map interrupt\n");
4720                         return (ENXIO);
4721         }
4722 #if defined(__DragonFly__)
4723         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4724                                iwm_intr, sc, &sc->sc_ih,
4725                                &wlan_global_serializer);
4726 #else
4727         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4728             NULL, iwm_intr, sc, &sc->sc_ih);
4729 #endif
4730         if (sc->sc_ih == NULL) {
4731                 device_printf(dev, "can't establish interrupt");
4732 #if defined(__DragonFly__)
4733                 pci_release_msi(dev);
4734 #endif
4735                         return (ENXIO);
4736         }
4737         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4738
4739         return (0);
4740 }
4741
4742 static void
4743 iwm_pci_detach(device_t dev)
4744 {
4745         struct iwm_softc *sc = device_get_softc(dev);
4746
4747         if (sc->sc_irq != NULL) {
4748                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4749                 bus_release_resource(dev, SYS_RES_IRQ,
4750                     rman_get_rid(sc->sc_irq), sc->sc_irq);
4751                 pci_release_msi(dev);
4752 #if defined(__DragonFly__)
4753                 sc->sc_irq = NULL;
4754 #endif
4755         }
4756         if (sc->sc_mem != NULL) {
4757                 bus_release_resource(dev, SYS_RES_MEMORY,
4758                     rman_get_rid(sc->sc_mem), sc->sc_mem);
4759 #if defined(__DragonFly__)
4760                 sc->sc_mem = NULL;
4761 #endif
4762         }
4763 }
4764
4765
4766
4767 static int
4768 iwm_attach(device_t dev)
4769 {
4770         struct iwm_softc *sc = device_get_softc(dev);
4771         struct ieee80211com *ic = &sc->sc_ic;
4772         int error;
4773         int txq_i, i;
4774
4775         sc->sc_dev = dev;
4776         IWM_LOCK_INIT(sc);
4777         mbufq_init(&sc->sc_snd, ifqmaxlen);
4778 #if defined(__DragonFly__)
4779         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4780 #else
4781         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4782 #endif
4783         callout_init(&sc->sc_led_blink_to);
4784         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4785         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4786             taskqueue_thread_enqueue, &sc->sc_tq);
4787 #if defined(__DragonFly__)
4788         error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
4789                                         -1, "iwm_taskq");
4790 #else
4791         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4792 #endif
4793         if (error != 0) {
4794                 device_printf(dev, "can't start threads, error %d\n",
4795                     error);
4796                 goto fail;
4797         }
4798
4799         /* PCI attach */
4800         error = iwm_pci_attach(dev);
4801         if (error != 0)
4802                 goto fail;
4803
4804         sc->sc_wantresp = -1;
4805
4806         /* Check device type */
4807         error = iwm_dev_check(dev);
4808         if (error != 0)
4809                 goto fail;
4810
4811         sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4812
4813         /*
4814          * We now start fiddling with the hardware
4815          */
4816         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4817         if (iwm_prepare_card_hw(sc) != 0) {
4818                 device_printf(dev, "could not initialize hardware\n");
4819                 goto fail;
4820         }
4821
4822         /* Allocate DMA memory for firmware transfers. */
4823         if ((error = iwm_alloc_fwmem(sc)) != 0) {
4824                 device_printf(dev, "could not allocate memory for firmware\n");
4825                 goto fail;
4826         }
4827
4828         /* Allocate "Keep Warm" page. */
4829         if ((error = iwm_alloc_kw(sc)) != 0) {
4830                 device_printf(dev, "could not allocate keep warm page\n");
4831                 goto fail;
4832         }
4833
4834         /* We use ICT interrupts */
4835         if ((error = iwm_alloc_ict(sc)) != 0) {
4836                 device_printf(dev, "could not allocate ICT table\n");
4837                 goto fail;
4838         }
4839
4840         /* Allocate TX scheduler "rings". */
4841         if ((error = iwm_alloc_sched(sc)) != 0) {
4842                 device_printf(dev, "could not allocate TX scheduler rings\n");
4843                 goto fail;
4844         }
4845
4846         /* Allocate TX rings */
4847         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4848                 if ((error = iwm_alloc_tx_ring(sc,
4849                     &sc->txq[txq_i], txq_i)) != 0) {
4850                         device_printf(dev,
4851                             "could not allocate TX ring %d\n",
4852                             txq_i);
4853                         goto fail;
4854                 }
4855         }
4856
4857         /* Allocate RX ring. */
4858         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4859                 device_printf(dev, "could not allocate RX ring\n");
4860                 goto fail;
4861         }
4862
4863         /* Clear pending interrupts. */
4864         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4865
4866         ic->ic_softc = sc;
4867         ic->ic_name = device_get_nameunit(sc->sc_dev);
4868         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
4869         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
4870
4871         /* Set device capabilities. */
4872         ic->ic_caps =
4873             IEEE80211_C_STA |
4874             IEEE80211_C_WPA |           /* WPA/RSN */
4875             IEEE80211_C_WME |
4876             IEEE80211_C_SHSLOT |        /* short slot time supported */
4877             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
4878 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
4879             ;
4880         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4881                 sc->sc_phyctxt[i].id = i;
4882                 sc->sc_phyctxt[i].color = 0;
4883                 sc->sc_phyctxt[i].ref = 0;
4884                 sc->sc_phyctxt[i].channel = NULL;
4885         }
4886
4887         /* Max RSSI */
4888         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4889         sc->sc_preinit_hook.ich_func = iwm_preinit;
4890         sc->sc_preinit_hook.ich_arg = sc;
4891         sc->sc_preinit_hook.ich_desc = "iwm";
4892         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4893                 device_printf(dev, "config_intrhook_establish failed\n");
4894                 goto fail;
4895         }
4896
4897 #ifdef IWM_DEBUG
4898         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4899             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4900             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4901 #endif
4902
4903         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4904             "<-%s\n", __func__);
4905
4906         return 0;
4907
4908         /* Free allocated memory if something failed during attachment. */
4909 fail:
4910         iwm_detach_local(sc, 0);
4911
4912         return ENXIO;
4913 }
4914
4915 static int
4916 iwm_update_edca(struct ieee80211com *ic)
4917 {
4918         struct iwm_softc *sc = ic->ic_softc;
4919
4920         device_printf(sc->sc_dev, "%s: called\n", __func__);
4921         return (0);
4922 }
4923
4924 static void
4925 iwm_preinit(void *arg)
4926 {
4927         struct iwm_softc *sc = arg;
4928         device_t dev = sc->sc_dev;
4929         struct ieee80211com *ic = &sc->sc_ic;
4930         int error;
4931
4932         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4933             "->%s\n", __func__);
4934
4935         IWM_LOCK(sc);
4936         if ((error = iwm_start_hw(sc)) != 0) {
4937                 device_printf(dev, "could not initialize hardware\n");
4938                 IWM_UNLOCK(sc);
4939                 goto fail;
4940         }
4941
4942         error = iwm_run_init_mvm_ucode(sc, 1);
4943         iwm_stop_device(sc);
4944         if (error) {
4945                 IWM_UNLOCK(sc);
4946                 goto fail;
4947         }
4948         device_printf(dev,
4949             "revision 0x%x, firmware %d.%d (API ver. %d)\n",
4950             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4951             IWM_UCODE_MAJOR(sc->sc_fwver),
4952             IWM_UCODE_MINOR(sc->sc_fwver),
4953             IWM_UCODE_API(sc->sc_fwver));
4954
4955         /* not all hardware can do 5GHz band */
4956         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4957                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4958                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4959         IWM_UNLOCK(sc);
4960
4961         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4962             ic->ic_channels);
4963
4964         /*
4965          * At this point we've committed - if we fail to do setup,
4966          * we now also have to tear down the net80211 state.
4967          */
4968         ieee80211_ifattach(ic);
4969         ic->ic_vap_create = iwm_vap_create;
4970         ic->ic_vap_delete = iwm_vap_delete;
4971         ic->ic_raw_xmit = iwm_raw_xmit;
4972         ic->ic_node_alloc = iwm_node_alloc;
4973         ic->ic_scan_start = iwm_scan_start;
4974         ic->ic_scan_end = iwm_scan_end;
4975         ic->ic_update_mcast = iwm_update_mcast;
4976         ic->ic_getradiocaps = iwm_init_channel_map;
4977         ic->ic_set_channel = iwm_set_channel;
4978         ic->ic_scan_curchan = iwm_scan_curchan;
4979         ic->ic_scan_mindwell = iwm_scan_mindwell;
4980         ic->ic_wme.wme_update = iwm_update_edca;
4981         ic->ic_parent = iwm_parent;
4982         ic->ic_transmit = iwm_transmit;
4983         iwm_radiotap_attach(sc);
4984         if (bootverbose)
4985                 ieee80211_announce(ic);
4986
4987         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4988             "<-%s\n", __func__);
4989         config_intrhook_disestablish(&sc->sc_preinit_hook);
4990
4991         return;
4992 fail:
4993         config_intrhook_disestablish(&sc->sc_preinit_hook);
4994         iwm_detach_local(sc, 0);
4995 }
4996
4997 /*
4998  * Attach the interface to 802.11 radiotap.
4999  */
5000 static void
5001 iwm_radiotap_attach(struct iwm_softc *sc)
5002 {
5003         struct ieee80211com *ic = &sc->sc_ic;
5004
5005         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5006             "->%s begin\n", __func__);
5007         ieee80211_radiotap_attach(ic,
5008             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5009                 IWM_TX_RADIOTAP_PRESENT,
5010             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5011                 IWM_RX_RADIOTAP_PRESENT);
5012         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5013             "->%s end\n", __func__);
5014 }
5015
5016 static struct ieee80211vap *
5017 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5018     enum ieee80211_opmode opmode, int flags,
5019     const uint8_t bssid[IEEE80211_ADDR_LEN],
5020     const uint8_t mac[IEEE80211_ADDR_LEN])
5021 {
5022         struct iwm_vap *ivp;
5023         struct ieee80211vap *vap;
5024
5025         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5026                 return NULL;
5027         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
5028         vap = &ivp->iv_vap;
5029         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
5030         vap->iv_bmissthreshold = 10;            /* override default */
5031         /* Override with driver methods. */
5032         ivp->iv_newstate = vap->iv_newstate;
5033         vap->iv_newstate = iwm_newstate;
5034
5035         ieee80211_ratectl_init(vap);
5036         /* Complete setup. */
5037         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
5038             mac);
5039         ic->ic_opmode = opmode;
5040
5041         return vap;
5042 }
5043
5044 static void
5045 iwm_vap_delete(struct ieee80211vap *vap)
5046 {
5047         struct iwm_vap *ivp = IWM_VAP(vap);
5048
5049         ieee80211_ratectl_deinit(vap);
5050         ieee80211_vap_detach(vap);
5051         kfree(ivp, M_80211_VAP);
5052 }
5053
5054 static void
5055 iwm_scan_start(struct ieee80211com *ic)
5056 {
5057         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5058         struct iwm_softc *sc = ic->ic_softc;
5059         int error;
5060
5061         if (sc->sc_scanband)
5062                 return;
5063         IWM_LOCK(sc);
5064         error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5065         if (error) {
5066                 device_printf(sc->sc_dev, "could not initiate scan\n");
5067                 IWM_UNLOCK(sc);
5068                 ieee80211_cancel_scan(vap);
5069         } else {
5070                 iwm_led_blink_start(sc);
5071                 IWM_UNLOCK(sc);
5072         }
5073 }
5074
5075 static void
5076 iwm_scan_end(struct ieee80211com *ic)
5077 {
5078         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5079         struct iwm_softc *sc = ic->ic_softc;
5080
5081         IWM_LOCK(sc);
5082         iwm_led_blink_stop(sc);
5083         if (vap->iv_state == IEEE80211_S_RUN)
5084                 iwm_mvm_led_enable(sc);
5085         IWM_UNLOCK(sc);
5086 }
5087
5088 static void
5089 iwm_update_mcast(struct ieee80211com *ic)
5090 {
5091 }
5092
5093 static void
5094 iwm_set_channel(struct ieee80211com *ic)
5095 {
5096 }
5097
5098 static void
5099 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5100 {
5101 }
5102
5103 static void
5104 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5105 {
5106         return;
5107 }
5108
5109 void
5110 iwm_init_task(void *arg1)
5111 {
5112         struct iwm_softc *sc = arg1;
5113
5114         IWM_LOCK(sc);
5115         while (sc->sc_flags & IWM_FLAG_BUSY) {
5116 #if defined(__DragonFly__)
5117                 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5118 #else
5119                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5120 #endif
5121 }
5122         sc->sc_flags |= IWM_FLAG_BUSY;
5123         iwm_stop(sc);
5124         if (sc->sc_ic.ic_nrunning > 0)
5125                 iwm_init(sc);
5126         sc->sc_flags &= ~IWM_FLAG_BUSY;
5127         wakeup(&sc->sc_flags);
5128         IWM_UNLOCK(sc);
5129 }
5130
5131 static int
5132 iwm_resume(device_t dev)
5133 {
5134         struct iwm_softc *sc = device_get_softc(dev);
5135         int do_reinit = 0;
5136         uint16_t reg;
5137
5138         /* Clear device-specific "PCI retry timeout" register (41h). */
5139         reg = pci_read_config(dev, 0x40, sizeof(reg));
5140         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5141         iwm_init_task(device_get_softc(dev));
5142
5143         IWM_LOCK(sc);
5144         if (sc->sc_flags & IWM_FLAG_DORESUME) {
5145                 sc->sc_flags &= ~IWM_FLAG_DORESUME;
5146                 do_reinit = 1;
5147         }
5148         IWM_UNLOCK(sc);
5149
5150         if (do_reinit)
5151                 ieee80211_resume_all(&sc->sc_ic);
5152
5153         return 0;
5154 }
5155
5156 static int
5157 iwm_suspend(device_t dev)
5158 {
5159         int do_stop = 0;
5160         struct iwm_softc *sc = device_get_softc(dev);
5161
5162         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
5163
5164         ieee80211_suspend_all(&sc->sc_ic);
5165
5166         if (do_stop) {
5167                 IWM_LOCK(sc);
5168                 iwm_stop(sc);
5169                 sc->sc_flags |= IWM_FLAG_DORESUME;
5170                 IWM_UNLOCK(sc);
5171         }
5172
5173         return (0);
5174 }
5175
5176 static int
5177 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5178 {
5179         struct iwm_fw_info *fw = &sc->sc_fw;
5180         device_t dev = sc->sc_dev;
5181         int i;
5182
5183         if (sc->sc_tq) {
5184 #if defined(__DragonFly__)
5185                 /* doesn't exist for DFly, DFly drains tasks on free */
5186 #else
5187                 taskqueue_drain_all(sc->sc_tq);
5188 #endif
5189                 taskqueue_free(sc->sc_tq);
5190 #if defined(__DragonFly__)
5191                 sc->sc_tq = NULL;
5192 #endif
5193         }
5194         callout_drain(&sc->sc_led_blink_to);
5195         callout_drain(&sc->sc_watchdog_to);
5196         iwm_stop_device(sc);
5197         if (do_net80211) {
5198                 ieee80211_ifdetach(&sc->sc_ic);
5199         }
5200
5201         /* Free descriptor rings */
5202         for (i = 0; i < nitems(sc->txq); i++)
5203                 iwm_free_tx_ring(sc, &sc->txq[i]);
5204
5205         /* Free firmware */
5206         if (fw->fw_fp != NULL)
5207                 iwm_fw_info_free(fw);
5208
5209         /* Free scheduler */
5210         iwm_free_sched(sc);
5211         if (sc->ict_dma.vaddr != NULL)
5212                 iwm_free_ict(sc);
5213         if (sc->kw_dma.vaddr != NULL)
5214                 iwm_free_kw(sc);
5215         if (sc->fw_dma.vaddr != NULL)
5216                 iwm_free_fwmem(sc);
5217
5218         /* Finished with the hardware - detach things */
5219         iwm_pci_detach(dev);
5220
5221         mbufq_drain(&sc->sc_snd);
5222         IWM_LOCK_DESTROY(sc);
5223
5224         return (0);
5225 }
5226
5227 static int
5228 iwm_detach(device_t dev)
5229 {
5230         struct iwm_softc *sc = device_get_softc(dev);
5231
5232         return (iwm_detach_local(sc, 1));
5233 }
5234
5235 static device_method_t iwm_pci_methods[] = {
5236         /* Device interface */
5237         DEVMETHOD(device_probe,         iwm_probe),
5238         DEVMETHOD(device_attach,        iwm_attach),
5239         DEVMETHOD(device_detach,        iwm_detach),
5240         DEVMETHOD(device_suspend,       iwm_suspend),
5241         DEVMETHOD(device_resume,        iwm_resume),
5242
5243         DEVMETHOD_END
5244 };
5245
5246 static driver_t iwm_pci_driver = {
5247         "iwm",
5248         iwm_pci_methods,
5249         sizeof (struct iwm_softc)
5250 };
5251
5252 static devclass_t iwm_devclass;
5253
5254 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5255 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5256 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5257 MODULE_DEPEND(iwm, wlan, 1, 1, 1);