if_iwm - Get rid of some more superfluous bus_dmamap_sync calls in rx path.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150
151 #include <machine/endian.h>
152
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155
156 #include <net/bpf.h>
157
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_pcie_trans.h"
189 #include "if_iwm_led.h"
190 #include "if_iwm_fw.h"
191
192 const uint8_t iwm_nvm_channels[] = {
193         /* 2.4 GHz */
194         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
195         /* 5 GHz */
196         36, 40, 44, 48, 52, 56, 60, 64,
197         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
198         149, 153, 157, 161, 165
199 };
200 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
201     "IWM_NUM_CHANNELS is too small");
202
203 const uint8_t iwm_nvm_channels_8000[] = {
204         /* 2.4 GHz */
205         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
206         /* 5 GHz */
207         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
208         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
209         149, 153, 157, 161, 165, 169, 173, 177, 181
210 };
211 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
212     "IWM_NUM_CHANNELS_8000 is too small");
213
214 #define IWM_NUM_2GHZ_CHANNELS   14
215 #define IWM_N_HW_ADDR_MASK      0xF
216
217 /*
218  * XXX For now, there's simply a fixed set of rate table entries
219  * that are populated.
220  */
221 const struct iwm_rate {
222         uint8_t rate;
223         uint8_t plcp;
224 } iwm_rates[] = {
225         {   2,  IWM_RATE_1M_PLCP  },
226         {   4,  IWM_RATE_2M_PLCP  },
227         {  11,  IWM_RATE_5M_PLCP  },
228         {  22,  IWM_RATE_11M_PLCP },
229         {  12,  IWM_RATE_6M_PLCP  },
230         {  18,  IWM_RATE_9M_PLCP  },
231         {  24,  IWM_RATE_12M_PLCP },
232         {  36,  IWM_RATE_18M_PLCP },
233         {  48,  IWM_RATE_24M_PLCP },
234         {  72,  IWM_RATE_36M_PLCP },
235         {  96,  IWM_RATE_48M_PLCP },
236         { 108,  IWM_RATE_54M_PLCP },
237 };
238 #define IWM_RIDX_CCK    0
239 #define IWM_RIDX_OFDM   4
240 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
241 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
242 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
243
244 struct iwm_nvm_section {
245         uint16_t length;
246         uint8_t *data;
247 };
248
249 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
250 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
251
252 struct iwm_mvm_alive_data {
253         int valid;
254         uint32_t scd_base_addr;
255 };
256
257 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
258 static int      iwm_firmware_store_section(struct iwm_softc *,
259                                            enum iwm_ucode_type,
260                                            const uint8_t *, size_t);
261 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
262 static void     iwm_fw_info_free(struct iwm_fw_info *);
263 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
264 #if !defined(__DragonFly__)
265 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
266 #endif
267 static int      iwm_alloc_fwmem(struct iwm_softc *);
268 static int      iwm_alloc_sched(struct iwm_softc *);
269 static int      iwm_alloc_kw(struct iwm_softc *);
270 static int      iwm_alloc_ict(struct iwm_softc *);
271 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
275                                   int);
276 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void     iwm_enable_interrupts(struct iwm_softc *);
279 static void     iwm_restore_interrupts(struct iwm_softc *);
280 static void     iwm_disable_interrupts(struct iwm_softc *);
281 static void     iwm_ict_reset(struct iwm_softc *);
282 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void     iwm_stop_device(struct iwm_softc *);
284 static void     iwm_mvm_nic_config(struct iwm_softc *);
285 static int      iwm_nic_rx_init(struct iwm_softc *);
286 static int      iwm_nic_tx_init(struct iwm_softc *);
287 static int      iwm_nic_init(struct iwm_softc *);
288 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
289 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
290 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
291                                    uint16_t, uint8_t *, uint16_t *);
292 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
293                                      uint16_t *, uint32_t);
294 static uint32_t iwm_eeprom_channel_flags(uint16_t);
295 static void     iwm_add_channel_band(struct iwm_softc *,
296                     struct ieee80211_channel[], int, int *, int, size_t,
297                     const uint8_t[]);
298 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
299                     struct ieee80211_channel[]);
300 static struct iwm_nvm_data *
301         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302                            const uint16_t *, const uint16_t *,
303                            const uint16_t *, const uint16_t *,
304                            const uint16_t *);
305 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
306 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
307                                                struct iwm_nvm_data *,
308                                                const uint16_t *,
309                                                const uint16_t *);
310 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
311                             const uint16_t *);
312 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
313 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
314                                   const uint16_t *);
315 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
316                                    const uint16_t *);
317 static void     iwm_set_radio_cfg(const struct iwm_softc *,
318                                   struct iwm_nvm_data *, uint32_t);
319 static struct iwm_nvm_data *
320         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
321 static int      iwm_nvm_init(struct iwm_softc *);
322 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
323                                       const struct iwm_fw_desc *);
324 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
325                                              bus_addr_t, uint32_t);
326 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
327                                                 const struct iwm_fw_sects *,
328                                                 int, int *);
329 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
330                                            const struct iwm_fw_sects *,
331                                            int, int *);
332 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
333                                                const struct iwm_fw_sects *);
334 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
335                                           const struct iwm_fw_sects *);
336 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
337 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
338 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
339 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
340                                               enum iwm_ucode_type);
341 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
342 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
344 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
345                                             struct iwm_rx_phy_info *);
346 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347                                       struct iwm_rx_packet *);
348 static int      iwm_get_noise(struct iwm_softc *sc,
349                     const struct iwm_mvm_statistics_rx_non_phy *);
350 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
351                                    struct iwm_rx_data *);
352 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
353                                          struct iwm_rx_packet *,
354                                          struct iwm_node *);
355 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
356 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
357 #if 0
358 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
359                                  uint16_t);
360 #endif
361 static const struct iwm_rate *
362         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
363                         struct ieee80211_frame *, struct iwm_tx_cmd *);
364 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
365                        struct ieee80211_node *, int);
366 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
367                              const struct ieee80211_bpf_params *);
368 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
369                                       uint32_t tfd_msk, uint32_t flags);
370 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
371                                                 struct iwm_mvm_add_sta_cmd_v7 *,
372                                                 int *);
373 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
374                                        int);
375 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
376 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
377 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
378                                            struct iwm_int_sta *,
379                                            const uint8_t *, uint16_t, uint16_t);
380 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
381 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
382 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
383 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
384 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
385 static struct ieee80211_node *
386                 iwm_node_alloc(struct ieee80211vap *,
387                                const uint8_t[IEEE80211_ADDR_LEN]);
388 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
389 static int      iwm_media_change(struct ifnet *);
390 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
391 static void     iwm_endscan_cb(void *, int);
392 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
393                                         struct iwm_sf_cfg_cmd *,
394                                         struct ieee80211_node *);
395 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
396 static int      iwm_send_bt_init_conf(struct iwm_softc *);
397 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
398 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
399 static int      iwm_init_hw(struct iwm_softc *);
400 static void     iwm_init(struct iwm_softc *);
401 static void     iwm_start(struct iwm_softc *);
402 static void     iwm_stop(struct iwm_softc *);
403 static void     iwm_watchdog(void *);
404 static void     iwm_parent(struct ieee80211com *);
405 #ifdef IWM_DEBUG
406 static const char *
407                 iwm_desc_lookup(uint32_t);
408 static void     iwm_nic_error(struct iwm_softc *);
409 static void     iwm_nic_umac_error(struct iwm_softc *);
410 #endif
411 static void     iwm_notif_intr(struct iwm_softc *);
412 static void     iwm_intr(void *);
413 static int      iwm_attach(device_t);
414 static int      iwm_is_valid_ether_addr(uint8_t *);
415 static void     iwm_preinit(void *);
416 static int      iwm_detach_local(struct iwm_softc *sc, int);
417 static void     iwm_init_task(void *);
418 static void     iwm_radiotap_attach(struct iwm_softc *);
419 static struct ieee80211vap *
420                 iwm_vap_create(struct ieee80211com *,
421                                const char [IFNAMSIZ], int,
422                                enum ieee80211_opmode, int,
423                                const uint8_t [IEEE80211_ADDR_LEN],
424                                const uint8_t [IEEE80211_ADDR_LEN]);
425 static void     iwm_vap_delete(struct ieee80211vap *);
426 static void     iwm_scan_start(struct ieee80211com *);
427 static void     iwm_scan_end(struct ieee80211com *);
428 static void     iwm_update_mcast(struct ieee80211com *);
429 static void     iwm_set_channel(struct ieee80211com *);
430 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
431 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
432 static int      iwm_detach(device_t);
433
434 #if defined(__DragonFly__)
435 static int      iwm_msi_enable = 1;
436
437 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
438
439 #endif
440
441 /*
442  * Firmware parser.
443  */
444
445 static int
446 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
447 {
448         const struct iwm_fw_cscheme_list *l = (const void *)data;
449
450         if (dlen < sizeof(*l) ||
451             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
452                 return EINVAL;
453
454         /* we don't actually store anything for now, always use s/w crypto */
455
456         return 0;
457 }
458
459 static int
460 iwm_firmware_store_section(struct iwm_softc *sc,
461     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
462 {
463         struct iwm_fw_sects *fws;
464         struct iwm_fw_desc *fwone;
465
466         if (type >= IWM_UCODE_TYPE_MAX)
467                 return EINVAL;
468         if (dlen < sizeof(uint32_t))
469                 return EINVAL;
470
471         fws = &sc->sc_fw.fw_sects[type];
472         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
473                 return EINVAL;
474
475         fwone = &fws->fw_sect[fws->fw_count];
476
477         /* first 32bit are device load offset */
478         memcpy(&fwone->offset, data, sizeof(uint32_t));
479
480         /* rest is data */
481         fwone->data = data + sizeof(uint32_t);
482         fwone->len = dlen - sizeof(uint32_t);
483
484         fws->fw_count++;
485
486         return 0;
487 }
488
489 #define IWM_DEFAULT_SCAN_CHANNELS 40
490
491 struct iwm_tlv_calib_data {
492         uint32_t ucode_type;
493         struct iwm_tlv_calib_ctrl calib;
494 } __packed;
495
496 static int
497 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
498 {
499         const struct iwm_tlv_calib_data *def_calib = data;
500         uint32_t ucode_type = le32toh(def_calib->ucode_type);
501
502         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
503                 device_printf(sc->sc_dev,
504                     "Wrong ucode_type %u for default "
505                     "calibration.\n", ucode_type);
506                 return EINVAL;
507         }
508
509         sc->sc_default_calib[ucode_type].flow_trigger =
510             def_calib->calib.flow_trigger;
511         sc->sc_default_calib[ucode_type].event_trigger =
512             def_calib->calib.event_trigger;
513
514         return 0;
515 }
516
517 static void
518 iwm_fw_info_free(struct iwm_fw_info *fw)
519 {
520         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
521         fw->fw_fp = NULL;
522         /* don't touch fw->fw_status */
523         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
524 }
525
526 static int
527 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
528 {
529         struct iwm_fw_info *fw = &sc->sc_fw;
530         const struct iwm_tlv_ucode_header *uhdr;
531         struct iwm_ucode_tlv tlv;
532         enum iwm_ucode_tlv_type tlv_type;
533         const struct firmware *fwp;
534         const uint8_t *data;
535         uint32_t usniffer_img;
536         uint32_t paging_mem_size;
537         int num_of_cpus;
538         int error = 0;
539         size_t len;
540
541         if (fw->fw_status == IWM_FW_STATUS_DONE &&
542             ucode_type != IWM_UCODE_INIT)
543                 return 0;
544
545         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
546 #if defined(__DragonFly__)
547                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
548 #else
549                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
550 #endif
551         }
552         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
553
554         if (fw->fw_fp != NULL)
555                 iwm_fw_info_free(fw);
556
557         /*
558          * Load firmware into driver memory.
559          * fw_fp will be set.
560          */
561         IWM_UNLOCK(sc);
562         fwp = firmware_get(sc->cfg->fw_name);
563         IWM_LOCK(sc);
564         if (fwp == NULL) {
565                 device_printf(sc->sc_dev,
566                     "could not read firmware %s (error %d)\n",
567                     sc->cfg->fw_name, error);
568                 goto out;
569         }
570         fw->fw_fp = fwp;
571
572         /* (Re-)Initialize default values. */
573         sc->sc_capaflags = 0;
574         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
575         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
576         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577
578         /*
579          * Parse firmware contents
580          */
581
582         uhdr = (const void *)fw->fw_fp->data;
583         if (*(const uint32_t *)fw->fw_fp->data != 0
584             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585                 device_printf(sc->sc_dev, "invalid firmware %s\n",
586                     sc->cfg->fw_name);
587                 error = EINVAL;
588                 goto out;
589         }
590
591         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
592             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594             IWM_UCODE_API(le32toh(uhdr->ver)));
595         data = uhdr->data;
596         len = fw->fw_fp->datasize - sizeof(*uhdr);
597
598         while (len >= sizeof(tlv)) {
599                 size_t tlv_len;
600                 const void *tlv_data;
601
602                 memcpy(&tlv, data, sizeof(tlv));
603                 tlv_len = le32toh(tlv.length);
604                 tlv_type = le32toh(tlv.type);
605
606                 len -= sizeof(tlv);
607                 data += sizeof(tlv);
608                 tlv_data = data;
609
610                 if (len < tlv_len) {
611                         device_printf(sc->sc_dev,
612                             "firmware too short: %zu bytes\n",
613                             len);
614                         error = EINVAL;
615                         goto parse_out;
616                 }
617
618                 switch ((int)tlv_type) {
619                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
620                         if (tlv_len < sizeof(uint32_t)) {
621                                 device_printf(sc->sc_dev,
622                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
623                                     __func__,
624                                     (int) tlv_len);
625                                 error = EINVAL;
626                                 goto parse_out;
627                         }
628                         sc->sc_capa_max_probe_len
629                             = le32toh(*(const uint32_t *)tlv_data);
630                         /* limit it to something sensible */
631                         if (sc->sc_capa_max_probe_len >
632                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
633                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
634                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
635                                     "ridiculous\n", __func__);
636                                 error = EINVAL;
637                                 goto parse_out;
638                         }
639                         break;
640                 case IWM_UCODE_TLV_PAN:
641                         if (tlv_len) {
642                                 device_printf(sc->sc_dev,
643                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
644                                     __func__,
645                                     (int) tlv_len);
646                                 error = EINVAL;
647                                 goto parse_out;
648                         }
649                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
650                         break;
651                 case IWM_UCODE_TLV_FLAGS:
652                         if (tlv_len < sizeof(uint32_t)) {
653                                 device_printf(sc->sc_dev,
654                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
655                                     __func__,
656                                     (int) tlv_len);
657                                 error = EINVAL;
658                                 goto parse_out;
659                         }
660                         /*
661                          * Apparently there can be many flags, but Linux driver
662                          * parses only the first one, and so do we.
663                          *
664                          * XXX: why does this override IWM_UCODE_TLV_PAN?
665                          * Intentional or a bug?  Observations from
666                          * current firmware file:
667                          *  1) TLV_PAN is parsed first
668                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
669                          * ==> this resets TLV_PAN to itself... hnnnk
670                          */
671                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
672                         break;
673                 case IWM_UCODE_TLV_CSCHEME:
674                         if ((error = iwm_store_cscheme(sc,
675                             tlv_data, tlv_len)) != 0) {
676                                 device_printf(sc->sc_dev,
677                                     "%s: iwm_store_cscheme(): returned %d\n",
678                                     __func__,
679                                     error);
680                                 goto parse_out;
681                         }
682                         break;
683                 case IWM_UCODE_TLV_NUM_OF_CPU:
684                         if (tlv_len != sizeof(uint32_t)) {
685                                 device_printf(sc->sc_dev,
686                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
687                                     __func__,
688                                     (int) tlv_len);
689                                 error = EINVAL;
690                                 goto parse_out;
691                         }
692                         num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
693                         if (num_of_cpus == 2) {
694                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
695                                         TRUE;
696                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
697                                         TRUE;
698                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
699                                         TRUE;
700                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701                                 device_printf(sc->sc_dev,
702                                     "%s: Driver supports only 1 or 2 CPUs\n",
703                                     __func__);
704                                 error = EINVAL;
705                                 goto parse_out;
706                         }
707                         break;
708                 case IWM_UCODE_TLV_SEC_RT:
709                         if ((error = iwm_firmware_store_section(sc,
710                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711                                 device_printf(sc->sc_dev,
712                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713                                     __func__,
714                                     error);
715                                 goto parse_out;
716                         }
717                         break;
718                 case IWM_UCODE_TLV_SEC_INIT:
719                         if ((error = iwm_firmware_store_section(sc,
720                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
721                                 device_printf(sc->sc_dev,
722                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
723                                     __func__,
724                                     error);
725                                 goto parse_out;
726                         }
727                         break;
728                 case IWM_UCODE_TLV_SEC_WOWLAN:
729                         if ((error = iwm_firmware_store_section(sc,
730                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
731                                 device_printf(sc->sc_dev,
732                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
733                                     __func__,
734                                     error);
735                                 goto parse_out;
736                         }
737                         break;
738                 case IWM_UCODE_TLV_DEF_CALIB:
739                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
740                                 device_printf(sc->sc_dev,
741                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
742                                     __func__,
743                                     (int) tlv_len,
744                                     (int) sizeof(struct iwm_tlv_calib_data));
745                                 error = EINVAL;
746                                 goto parse_out;
747                         }
748                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
749                                 device_printf(sc->sc_dev,
750                                     "%s: iwm_set_default_calib() failed: %d\n",
751                                     __func__,
752                                     error);
753                                 goto parse_out;
754                         }
755                         break;
756                 case IWM_UCODE_TLV_PHY_SKU:
757                         if (tlv_len != sizeof(uint32_t)) {
758                                 error = EINVAL;
759                                 device_printf(sc->sc_dev,
760                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
761                                     __func__,
762                                     (int) tlv_len);
763                                 goto parse_out;
764                         }
765                         sc->sc_fw.phy_config =
766                             le32toh(*(const uint32_t *)tlv_data);
767                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
768                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
769                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
770                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
771                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
772                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
773                         break;
774
775                 case IWM_UCODE_TLV_API_CHANGES_SET: {
776                         const struct iwm_ucode_api *api;
777                         if (tlv_len != sizeof(*api)) {
778                                 error = EINVAL;
779                                 goto parse_out;
780                         }
781                         api = (const struct iwm_ucode_api *)tlv_data;
782                         /* Flags may exceed 32 bits in future firmware. */
783                         if (le32toh(api->api_index) > 0) {
784                                 device_printf(sc->sc_dev,
785                                     "unsupported API index %d\n",
786                                     le32toh(api->api_index));
787                                 goto parse_out;
788                         }
789                         sc->sc_ucode_api = le32toh(api->api_flags);
790                         break;
791                 }
792
793                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
794                         const struct iwm_ucode_capa *capa;
795                         int idx, i;
796                         if (tlv_len != sizeof(*capa)) {
797                                 error = EINVAL;
798                                 goto parse_out;
799                         }
800                         capa = (const struct iwm_ucode_capa *)tlv_data;
801                         idx = le32toh(capa->api_index);
802                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
803                                 device_printf(sc->sc_dev,
804                                     "unsupported API index %d\n", idx);
805                                 goto parse_out;
806                         }
807                         for (i = 0; i < 32; i++) {
808                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
809                                         continue;
810                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
811                         }
812                         break;
813                 }
814
815                 case 48: /* undocumented TLV */
816                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
817                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
818                         /* ignore, not used by current driver */
819                         break;
820
821                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
822                         if ((error = iwm_firmware_store_section(sc,
823                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
824                             tlv_len)) != 0)
825                                 goto parse_out;
826                         break;
827
828                 case IWM_UCODE_TLV_PAGING:
829                         if (tlv_len != sizeof(uint32_t)) {
830                                 error = EINVAL;
831                                 goto parse_out;
832                         }
833                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
834
835                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
836                             "%s: Paging: paging enabled (size = %u bytes)\n",
837                             __func__, paging_mem_size);
838                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
839                                 device_printf(sc->sc_dev,
840                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
841                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
842                                 error = EINVAL;
843                                 goto out;
844                         }
845                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
846                                 device_printf(sc->sc_dev,
847                                     "%s: Paging: image isn't multiple %u\n",
848                                     __func__, IWM_FW_PAGING_SIZE);
849                                 error = EINVAL;
850                                 goto out;
851                         }
852
853                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
854                             paging_mem_size;
855                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
856                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
857                             paging_mem_size;
858                         break;
859
860                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
861                         if (tlv_len != sizeof(uint32_t)) {
862                                 error = EINVAL;
863                                 goto parse_out;
864                         }
865                         sc->sc_capa_n_scan_channels =
866                           le32toh(*(const uint32_t *)tlv_data);
867                         break;
868
869                 case IWM_UCODE_TLV_FW_VERSION:
870                         if (tlv_len != sizeof(uint32_t) * 3) {
871                                 error = EINVAL;
872                                 goto parse_out;
873                         }
874                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
875                             "%d.%d.%d",
876                             le32toh(((const uint32_t *)tlv_data)[0]),
877                             le32toh(((const uint32_t *)tlv_data)[1]),
878                             le32toh(((const uint32_t *)tlv_data)[2]));
879                         break;
880
881                 case IWM_UCODE_TLV_FW_MEM_SEG:
882                         break;
883
884                 default:
885                         device_printf(sc->sc_dev,
886                             "%s: unknown firmware section %d, abort\n",
887                             __func__, tlv_type);
888                         error = EINVAL;
889                         goto parse_out;
890                 }
891
892                 len -= roundup(tlv_len, 4);
893                 data += roundup(tlv_len, 4);
894         }
895
896         KASSERT(error == 0, ("unhandled error"));
897
898  parse_out:
899         if (error) {
900                 device_printf(sc->sc_dev, "firmware parse error %d, "
901                     "section type %d\n", error, tlv_type);
902         }
903
904         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
905                 device_printf(sc->sc_dev,
906                     "device uses unsupported power ops\n");
907                 error = ENOTSUP;
908         }
909
910  out:
911         if (error) {
912                 fw->fw_status = IWM_FW_STATUS_NONE;
913                 if (fw->fw_fp != NULL)
914                         iwm_fw_info_free(fw);
915         } else
916                 fw->fw_status = IWM_FW_STATUS_DONE;
917         wakeup(&sc->sc_fw);
918
919         return error;
920 }
921
922 /*
923  * DMA resource routines
924  */
925
926 /* fwmem is used to load firmware onto the card */
927 static int
928 iwm_alloc_fwmem(struct iwm_softc *sc)
929 {
930         /* Must be aligned on a 16-byte boundary. */
931         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
932             IWM_FH_MEM_TB_MAX_LENGTH, 16);
933 }
934
935 /* tx scheduler rings.  not used? */
936 static int
937 iwm_alloc_sched(struct iwm_softc *sc)
938 {
939         /* TX scheduler rings must be aligned on a 1KB boundary. */
940         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
941             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
942 }
943
944 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
945 static int
946 iwm_alloc_kw(struct iwm_softc *sc)
947 {
948         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
949 }
950
951 /* interrupt cause table */
952 static int
953 iwm_alloc_ict(struct iwm_softc *sc)
954 {
955         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
956             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
957 }
958
959 static int
960 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
961 {
962         bus_size_t size;
963         int i, error;
964
965         ring->cur = 0;
966
967         /* Allocate RX descriptors (256-byte aligned). */
968         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
969         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
970         if (error != 0) {
971                 device_printf(sc->sc_dev,
972                     "could not allocate RX ring DMA memory\n");
973                 goto fail;
974         }
975         ring->desc = ring->desc_dma.vaddr;
976
977         /* Allocate RX status area (16-byte aligned). */
978         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
979             sizeof(*ring->stat), 16);
980         if (error != 0) {
981                 device_printf(sc->sc_dev,
982                     "could not allocate RX status DMA memory\n");
983                 goto fail;
984         }
985         ring->stat = ring->stat_dma.vaddr;
986
987         /* Create RX buffer DMA tag. */
988 #if defined(__DragonFly__)
989         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
990                                    0,
991                                    BUS_SPACE_MAXADDR_32BIT,
992                                    BUS_SPACE_MAXADDR,
993                                    NULL, NULL,
994                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
995                                    BUS_DMA_NOWAIT, &ring->data_dmat);
996 #else
997         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
998             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
999             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1000 #endif
1001         if (error != 0) {
1002                 device_printf(sc->sc_dev,
1003                     "%s: could not create RX buf DMA tag, error %d\n",
1004                     __func__, error);
1005                 goto fail;
1006         }
1007
1008         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1009         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1010         if (error != 0) {
1011                 device_printf(sc->sc_dev,
1012                     "%s: could not create RX buf DMA map, error %d\n",
1013                     __func__, error);
1014                 goto fail;
1015         }
1016         /*
1017          * Allocate and map RX buffers.
1018          */
1019         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1020                 struct iwm_rx_data *data = &ring->data[i];
1021                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1022                 if (error != 0) {
1023                         device_printf(sc->sc_dev,
1024                             "%s: could not create RX buf DMA map, error %d\n",
1025                             __func__, error);
1026                         goto fail;
1027                 }
1028                 data->m = NULL;
1029
1030                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1031                         goto fail;
1032                 }
1033         }
1034         return 0;
1035
1036 fail:   iwm_free_rx_ring(sc, ring);
1037         return error;
1038 }
1039
1040 static void
1041 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1042 {
1043         /* Reset the ring state */
1044         ring->cur = 0;
1045
1046         /*
1047          * The hw rx ring index in shared memory must also be cleared,
1048          * otherwise the discrepancy can cause reprocessing chaos.
1049          */
1050         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1051 }
1052
1053 static void
1054 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1055 {
1056         int i;
1057
1058         iwm_dma_contig_free(&ring->desc_dma);
1059         iwm_dma_contig_free(&ring->stat_dma);
1060
1061         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1062                 struct iwm_rx_data *data = &ring->data[i];
1063
1064                 if (data->m != NULL) {
1065                         bus_dmamap_sync(ring->data_dmat, data->map,
1066                             BUS_DMASYNC_POSTREAD);
1067                         bus_dmamap_unload(ring->data_dmat, data->map);
1068                         m_freem(data->m);
1069                         data->m = NULL;
1070                 }
1071                 if (data->map != NULL) {
1072                         bus_dmamap_destroy(ring->data_dmat, data->map);
1073                         data->map = NULL;
1074                 }
1075         }
1076         if (ring->spare_map != NULL) {
1077                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1078                 ring->spare_map = NULL;
1079         }
1080         if (ring->data_dmat != NULL) {
1081                 bus_dma_tag_destroy(ring->data_dmat);
1082                 ring->data_dmat = NULL;
1083         }
1084 }
1085
1086 static int
1087 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1088 {
1089         bus_addr_t paddr;
1090         bus_size_t size;
1091         size_t maxsize;
1092         int nsegments;
1093         int i, error;
1094
1095         ring->qid = qid;
1096         ring->queued = 0;
1097         ring->cur = 0;
1098
1099         /* Allocate TX descriptors (256-byte aligned). */
1100         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1101         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1102         if (error != 0) {
1103                 device_printf(sc->sc_dev,
1104                     "could not allocate TX ring DMA memory\n");
1105                 goto fail;
1106         }
1107         ring->desc = ring->desc_dma.vaddr;
1108
1109         /*
1110          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1111          * to allocate commands space for other rings.
1112          */
1113         if (qid > IWM_MVM_CMD_QUEUE)
1114                 return 0;
1115
1116         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1117         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1118         if (error != 0) {
1119                 device_printf(sc->sc_dev,
1120                     "could not allocate TX cmd DMA memory\n");
1121                 goto fail;
1122         }
1123         ring->cmd = ring->cmd_dma.vaddr;
1124
1125         /* FW commands may require more mapped space than packets. */
1126         if (qid == IWM_MVM_CMD_QUEUE) {
1127                 maxsize = IWM_RBUF_SIZE;
1128                 nsegments = 1;
1129         } else {
1130                 maxsize = MCLBYTES;
1131                 nsegments = IWM_MAX_SCATTER - 2;
1132         }
1133
1134 #if defined(__DragonFly__)
1135         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1136                                    0,
1137                                    BUS_SPACE_MAXADDR_32BIT,
1138                                    BUS_SPACE_MAXADDR,
1139                                    NULL, NULL,
1140                                    maxsize, nsegments, maxsize,
1141                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1142 #else
1143         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1144             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1145             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1146 #endif
1147         if (error != 0) {
1148                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1149                 goto fail;
1150         }
1151
1152         paddr = ring->cmd_dma.paddr;
1153         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154                 struct iwm_tx_data *data = &ring->data[i];
1155
1156                 data->cmd_paddr = paddr;
1157                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1158                     + offsetof(struct iwm_tx_cmd, scratch);
1159                 paddr += sizeof(struct iwm_device_cmd);
1160
1161                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1162                 if (error != 0) {
1163                         device_printf(sc->sc_dev,
1164                             "could not create TX buf DMA map\n");
1165                         goto fail;
1166                 }
1167         }
1168         KASSERT(paddr == ring->cmd_dma.paddr + size,
1169             ("invalid physical address"));
1170         return 0;
1171
1172 fail:   iwm_free_tx_ring(sc, ring);
1173         return error;
1174 }
1175
1176 static void
1177 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178 {
1179         int i;
1180
1181         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1182                 struct iwm_tx_data *data = &ring->data[i];
1183
1184                 if (data->m != NULL) {
1185                         bus_dmamap_sync(ring->data_dmat, data->map,
1186                             BUS_DMASYNC_POSTWRITE);
1187                         bus_dmamap_unload(ring->data_dmat, data->map);
1188                         m_freem(data->m);
1189                         data->m = NULL;
1190                 }
1191         }
1192         /* Clear TX descriptors. */
1193         memset(ring->desc, 0, ring->desc_dma.size);
1194         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1195             BUS_DMASYNC_PREWRITE);
1196         sc->qfullmsk &= ~(1 << ring->qid);
1197         ring->queued = 0;
1198         ring->cur = 0;
1199
1200         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1201                 iwm_pcie_clear_cmd_in_flight(sc);
1202 }
1203
1204 static void
1205 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1206 {
1207         int i;
1208
1209         iwm_dma_contig_free(&ring->desc_dma);
1210         iwm_dma_contig_free(&ring->cmd_dma);
1211
1212         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213                 struct iwm_tx_data *data = &ring->data[i];
1214
1215                 if (data->m != NULL) {
1216                         bus_dmamap_sync(ring->data_dmat, data->map,
1217                             BUS_DMASYNC_POSTWRITE);
1218                         bus_dmamap_unload(ring->data_dmat, data->map);
1219                         m_freem(data->m);
1220                         data->m = NULL;
1221                 }
1222                 if (data->map != NULL) {
1223                         bus_dmamap_destroy(ring->data_dmat, data->map);
1224                         data->map = NULL;
1225                 }
1226         }
1227         if (ring->data_dmat != NULL) {
1228                 bus_dma_tag_destroy(ring->data_dmat);
1229                 ring->data_dmat = NULL;
1230         }
1231 }
1232
1233 /*
1234  * High-level hardware frobbing routines
1235  */
1236
1237 static void
1238 iwm_enable_interrupts(struct iwm_softc *sc)
1239 {
1240         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1241         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1242 }
1243
1244 static void
1245 iwm_restore_interrupts(struct iwm_softc *sc)
1246 {
1247         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1248 }
1249
1250 static void
1251 iwm_disable_interrupts(struct iwm_softc *sc)
1252 {
1253         /* disable interrupts */
1254         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1255
1256         /* acknowledge all interrupts */
1257         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1258         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1259 }
1260
1261 static void
1262 iwm_ict_reset(struct iwm_softc *sc)
1263 {
1264         iwm_disable_interrupts(sc);
1265
1266         /* Reset ICT table. */
1267         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1268         sc->ict_cur = 0;
1269
1270         /* Set physical address of ICT table (4KB aligned). */
1271         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1272             IWM_CSR_DRAM_INT_TBL_ENABLE
1273             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1274             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1275             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1276
1277         /* Switch to ICT interrupt mode in driver. */
1278         sc->sc_flags |= IWM_FLAG_USE_ICT;
1279
1280         /* Re-enable interrupts. */
1281         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1282         iwm_enable_interrupts(sc);
1283 }
1284
1285 /*
1286  * Since this .. hard-resets things, it's time to actually
1287  * mark the first vap (if any) as having no mac context.
1288  * It's annoying, but since the driver is potentially being
1289  * stop/start'ed whilst active (thanks openbsd port!) we
1290  * have to correctly track this.
1291  */
1292 static void
1293 iwm_stop_device(struct iwm_softc *sc)
1294 {
1295         struct ieee80211com *ic = &sc->sc_ic;
1296         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1297         int chnl, qid;
1298         uint32_t mask = 0;
1299
1300         /* tell the device to stop sending interrupts */
1301         iwm_disable_interrupts(sc);
1302
1303         /*
1304          * FreeBSD-local: mark the first vap as not-uploaded,
1305          * so the next transition through auth/assoc
1306          * will correctly populate the MAC context.
1307          */
1308         if (vap) {
1309                 struct iwm_vap *iv = IWM_VAP(vap);
1310                 iv->is_uploaded = 0;
1311         }
1312
1313         /* device going down, Stop using ICT table */
1314         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1315
1316         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1317
1318         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1319
1320         if (iwm_nic_lock(sc)) {
1321                 /* Stop each Tx DMA channel */
1322                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1323                         IWM_WRITE(sc,
1324                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1325                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1326                 }
1327
1328                 /* Wait for DMA channels to be idle */
1329                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1330                     5000)) {
1331                         device_printf(sc->sc_dev,
1332                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1333                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1334                 }
1335                 iwm_nic_unlock(sc);
1336         }
1337         iwm_pcie_rx_stop(sc);
1338
1339         /* Stop RX ring. */
1340         iwm_reset_rx_ring(sc, &sc->rxq);
1341
1342         /* Reset all TX rings. */
1343         for (qid = 0; qid < nitems(sc->txq); qid++)
1344                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1345
1346         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1347                 /* Power-down device's busmaster DMA clocks */
1348                 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1349                     IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1350                 DELAY(5);
1351         }
1352
1353         /* Make sure (redundant) we've released our request to stay awake */
1354         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1355             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1356
1357         /* Stop the device, and put it in low power state */
1358         iwm_apm_stop(sc);
1359
1360         /* stop and reset the on-board processor */
1361         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1362         DELAY(1000);
1363
1364         /*
1365          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1366          * This is a bug in certain verions of the hardware.
1367          * Certain devices also keep sending HW RF kill interrupt all
1368          * the time, unless the interrupt is ACKed even if the interrupt
1369          * should be masked. Re-ACK all the interrupts here.
1370          */
1371         iwm_disable_interrupts(sc);
1372
1373         /*
1374          * Even if we stop the HW, we still want the RF kill
1375          * interrupt
1376          */
1377         iwm_enable_rfkill_int(sc);
1378         iwm_check_rfkill(sc);
1379 }
1380
1381 static void
1382 iwm_mvm_nic_config(struct iwm_softc *sc)
1383 {
1384         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1385         uint32_t reg_val = 0;
1386         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1387
1388         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1389             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1390         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1391             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1392         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1393             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1394
1395         /* SKU control */
1396         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1397             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1398         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1399             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1400
1401         /* radio configuration */
1402         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1403         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1404         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1405
1406         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1407
1408         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1409             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1410             radio_cfg_step, radio_cfg_dash);
1411
1412         /*
1413          * W/A : NIC is stuck in a reset state after Early PCIe power off
1414          * (PCIe power is lost before PERST# is asserted), causing ME FW
1415          * to lose ownership and not being able to obtain it back.
1416          */
1417         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1418                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1419                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1420                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1421         }
1422 }
1423
1424 static int
1425 iwm_nic_rx_init(struct iwm_softc *sc)
1426 {
1427         /*
1428          * Initialize RX ring.  This is from the iwn driver.
1429          */
1430         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1431
1432         /* Stop Rx DMA */
1433         iwm_pcie_rx_stop(sc);
1434
1435         if (!iwm_nic_lock(sc))
1436                 return EBUSY;
1437
1438         /* reset and flush pointers */
1439         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1440         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1441         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1442         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1443
1444         /* Set physical address of RX ring (256-byte aligned). */
1445         IWM_WRITE(sc,
1446             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1447
1448         /* Set physical address of RX status (16-byte aligned). */
1449         IWM_WRITE(sc,
1450             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1451
1452 #if defined(__DragonFly__)
1453         /* Force serialization (probably not needed but don't trust the HW) */
1454         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1455 #endif
1456
1457         /* Enable RX. */
1458         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1459             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1460             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1461             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1462             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1463             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1464             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1465             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1466
1467         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1468
1469         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1470         if (sc->cfg->host_interrupt_operation_mode)
1471                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1472
1473         /*
1474          * Thus sayeth el jefe (iwlwifi) via a comment:
1475          *
1476          * This value should initially be 0 (before preparing any
1477          * RBs), should be 8 after preparing the first 8 RBs (for example)
1478          */
1479         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1480
1481         iwm_nic_unlock(sc);
1482
1483         return 0;
1484 }
1485
1486 static int
1487 iwm_nic_tx_init(struct iwm_softc *sc)
1488 {
1489         int qid;
1490
1491         if (!iwm_nic_lock(sc))
1492                 return EBUSY;
1493
1494         /* Deactivate TX scheduler. */
1495         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1496
1497         /* Set physical address of "keep warm" page (16-byte aligned). */
1498         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1499
1500         /* Initialize TX rings. */
1501         for (qid = 0; qid < nitems(sc->txq); qid++) {
1502                 struct iwm_tx_ring *txq = &sc->txq[qid];
1503
1504                 /* Set physical address of TX ring (256-byte aligned). */
1505                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1506                     txq->desc_dma.paddr >> 8);
1507                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1508                     "%s: loading ring %d descriptors (%p) at %lx\n",
1509                     __func__,
1510                     qid, txq->desc,
1511                     (unsigned long) (txq->desc_dma.paddr >> 8));
1512         }
1513
1514         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1515
1516         iwm_nic_unlock(sc);
1517
1518         return 0;
1519 }
1520
1521 static int
1522 iwm_nic_init(struct iwm_softc *sc)
1523 {
1524         int error;
1525
1526         iwm_apm_init(sc);
1527         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1528                 iwm_set_pwr(sc);
1529
1530         iwm_mvm_nic_config(sc);
1531
1532         if ((error = iwm_nic_rx_init(sc)) != 0)
1533                 return error;
1534
1535         /*
1536          * Ditto for TX, from iwn
1537          */
1538         if ((error = iwm_nic_tx_init(sc)) != 0)
1539                 return error;
1540
1541         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1542             "%s: shadow registers enabled\n", __func__);
1543         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1544
1545         return 0;
1546 }
1547
1548 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1549         IWM_MVM_TX_FIFO_VO,
1550         IWM_MVM_TX_FIFO_VI,
1551         IWM_MVM_TX_FIFO_BE,
1552         IWM_MVM_TX_FIFO_BK,
1553 };
1554
1555 static int
1556 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1557 {
1558         if (!iwm_nic_lock(sc)) {
1559                 device_printf(sc->sc_dev,
1560                     "%s: cannot enable txq %d\n",
1561                     __func__,
1562                     qid);
1563                 return EBUSY;
1564         }
1565
1566         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1567
1568         if (qid == IWM_MVM_CMD_QUEUE) {
1569                 /* unactivate before configuration */
1570                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1571                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1572                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1573
1574                 iwm_nic_unlock(sc);
1575
1576                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1577
1578                 if (!iwm_nic_lock(sc)) {
1579                         device_printf(sc->sc_dev,
1580                             "%s: cannot enable txq %d\n", __func__, qid);
1581                         return EBUSY;
1582                 }
1583                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1584                 iwm_nic_unlock(sc);
1585
1586                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1587                 /* Set scheduler window size and frame limit. */
1588                 iwm_write_mem32(sc,
1589                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1590                     sizeof(uint32_t),
1591                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1592                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1593                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1594                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1595
1596                 if (!iwm_nic_lock(sc)) {
1597                         device_printf(sc->sc_dev,
1598                             "%s: cannot enable txq %d\n", __func__, qid);
1599                         return EBUSY;
1600                 }
1601                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1602                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1603                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1604                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1605                     IWM_SCD_QUEUE_STTS_REG_MSK);
1606         } else {
1607                 struct iwm_scd_txq_cfg_cmd cmd;
1608                 int error;
1609
1610                 iwm_nic_unlock(sc);
1611
1612                 memset(&cmd, 0, sizeof(cmd));
1613                 cmd.scd_queue = qid;
1614                 cmd.enable = 1;
1615                 cmd.sta_id = sta_id;
1616                 cmd.tx_fifo = fifo;
1617                 cmd.aggregate = 0;
1618                 cmd.window = IWM_FRAME_LIMIT;
1619
1620                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1621                     sizeof(cmd), &cmd);
1622                 if (error) {
1623                         device_printf(sc->sc_dev,
1624                             "cannot enable txq %d\n", qid);
1625                         return error;
1626                 }
1627
1628                 if (!iwm_nic_lock(sc))
1629                         return EBUSY;
1630         }
1631
1632         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1633             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1634
1635         iwm_nic_unlock(sc);
1636
1637         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1638             __func__, qid, fifo);
1639
1640         return 0;
1641 }
1642
1643 static int
1644 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1645 {
1646         int error, chnl;
1647
1648         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1649             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1650
1651         if (!iwm_nic_lock(sc))
1652                 return EBUSY;
1653
1654         iwm_ict_reset(sc);
1655
1656         iwm_nic_unlock(sc);
1657
1658         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1659         if (scd_base_addr != 0 &&
1660             scd_base_addr != sc->scd_base_addr) {
1661                 device_printf(sc->sc_dev,
1662                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1663                     __func__, sc->scd_base_addr, scd_base_addr);
1664         }
1665
1666         /* reset context data, TX status and translation data */
1667         error = iwm_write_mem(sc,
1668             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1669             NULL, clear_dwords);
1670         if (error)
1671                 return EBUSY;
1672
1673         if (!iwm_nic_lock(sc))
1674                 return EBUSY;
1675
1676         /* Set physical address of TX scheduler rings (1KB aligned). */
1677         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1678
1679         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1680
1681         iwm_nic_unlock(sc);
1682
1683         /* enable command channel */
1684         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1685         if (error)
1686                 return error;
1687
1688         if (!iwm_nic_lock(sc))
1689                 return EBUSY;
1690
1691         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1692
1693         /* Enable DMA channels. */
1694         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1695                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1696                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1697                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1698         }
1699
1700         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1701             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1702
1703         iwm_nic_unlock(sc);
1704
1705         /* Enable L1-Active */
1706         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1707                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1708                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1709         }
1710
1711         return error;
1712 }
1713
1714 /*
1715  * NVM read access and content parsing.  We do not support
1716  * external NVM or writing NVM.
1717  * iwlwifi/mvm/nvm.c
1718  */
1719
1720 /* Default NVM size to read */
1721 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1722
1723 #define IWM_NVM_WRITE_OPCODE 1
1724 #define IWM_NVM_READ_OPCODE 0
1725
1726 /* load nvm chunk response */
1727 enum {
1728         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1729         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1730 };
1731
1732 static int
1733 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1734         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1735 {
1736         struct iwm_nvm_access_cmd nvm_access_cmd = {
1737                 .offset = htole16(offset),
1738                 .length = htole16(length),
1739                 .type = htole16(section),
1740                 .op_code = IWM_NVM_READ_OPCODE,
1741         };
1742         struct iwm_nvm_access_resp *nvm_resp;
1743         struct iwm_rx_packet *pkt;
1744         struct iwm_host_cmd cmd = {
1745                 .id = IWM_NVM_ACCESS_CMD,
1746                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1747                 .data = { &nvm_access_cmd, },
1748         };
1749         int ret, bytes_read, offset_read;
1750         uint8_t *resp_data;
1751
1752         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1753
1754         ret = iwm_send_cmd(sc, &cmd);
1755         if (ret) {
1756                 device_printf(sc->sc_dev,
1757                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1758                 return ret;
1759         }
1760
1761         pkt = cmd.resp_pkt;
1762
1763         /* Extract NVM response */
1764         nvm_resp = (void *)pkt->data;
1765         ret = le16toh(nvm_resp->status);
1766         bytes_read = le16toh(nvm_resp->length);
1767         offset_read = le16toh(nvm_resp->offset);
1768         resp_data = nvm_resp->data;
1769         if (ret) {
1770                 if ((offset != 0) &&
1771                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1772                         /*
1773                          * meaning of NOT_VALID_ADDRESS:
1774                          * driver try to read chunk from address that is
1775                          * multiple of 2K and got an error since addr is empty.
1776                          * meaning of (offset != 0): driver already
1777                          * read valid data from another chunk so this case
1778                          * is not an error.
1779                          */
1780                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1781                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1782                                     offset);
1783                         *len = 0;
1784                         ret = 0;
1785                 } else {
1786                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1787                                     "NVM access command failed with status %d\n", ret);
1788                         ret = EIO;
1789                 }
1790                 goto exit;
1791         }
1792
1793         if (offset_read != offset) {
1794                 device_printf(sc->sc_dev,
1795                     "NVM ACCESS response with invalid offset %d\n",
1796                     offset_read);
1797                 ret = EINVAL;
1798                 goto exit;
1799         }
1800
1801         if (bytes_read > length) {
1802                 device_printf(sc->sc_dev,
1803                     "NVM ACCESS response with too much data "
1804                     "(%d bytes requested, %d bytes received)\n",
1805                     length, bytes_read);
1806                 ret = EINVAL;
1807                 goto exit;
1808         }
1809
1810         /* Write data to NVM */
1811         memcpy(data + offset, resp_data, bytes_read);
1812         *len = bytes_read;
1813
1814  exit:
1815         iwm_free_resp(sc, &cmd);
1816         return ret;
1817 }
1818
1819 /*
1820  * Reads an NVM section completely.
1821  * NICs prior to 7000 family don't have a real NVM, but just read
1822  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1823  * by uCode, we need to manually check in this case that we don't
1824  * overflow and try to read more than the EEPROM size.
1825  * For 7000 family NICs, we supply the maximal size we can read, and
1826  * the uCode fills the response with as much data as we can,
1827  * without overflowing, so no check is needed.
1828  */
1829 static int
1830 iwm_nvm_read_section(struct iwm_softc *sc,
1831         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1832 {
1833         uint16_t seglen, length, offset = 0;
1834         int ret;
1835
1836         /* Set nvm section read length */
1837         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1838
1839         seglen = length;
1840
1841         /* Read the NVM until exhausted (reading less than requested) */
1842         while (seglen == length) {
1843                 /* Check no memory assumptions fail and cause an overflow */
1844                 if ((size_read + offset + length) >
1845                     sc->cfg->eeprom_size) {
1846                         device_printf(sc->sc_dev,
1847                             "EEPROM size is too small for NVM\n");
1848                         return ENOBUFS;
1849                 }
1850
1851                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1852                 if (ret) {
1853                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1854                                     "Cannot read NVM from section %d offset %d, length %d\n",
1855                                     section, offset, length);
1856                         return ret;
1857                 }
1858                 offset += seglen;
1859         }
1860
1861         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1862                     "NVM section %d read completed\n", section);
1863         *len = offset;
1864         return 0;
1865 }
1866
1867 /* NVM offsets (in words) definitions */
1868 enum iwm_nvm_offsets {
1869         /* NVM HW-Section offset (in words) definitions */
1870         IWM_HW_ADDR = 0x15,
1871
1872 /* NVM SW-Section offset (in words) definitions */
1873         IWM_NVM_SW_SECTION = 0x1C0,
1874         IWM_NVM_VERSION = 0,
1875         IWM_RADIO_CFG = 1,
1876         IWM_SKU = 2,
1877         IWM_N_HW_ADDRS = 3,
1878         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1879
1880 /* NVM calibration section offset (in words) definitions */
1881         IWM_NVM_CALIB_SECTION = 0x2B8,
1882         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1883 };
1884
1885 enum iwm_8000_nvm_offsets {
1886         /* NVM HW-Section offset (in words) definitions */
1887         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1888         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1889         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1890         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1891         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1892
1893         /* NVM SW-Section offset (in words) definitions */
1894         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1895         IWM_NVM_VERSION_8000 = 0,
1896         IWM_RADIO_CFG_8000 = 0,
1897         IWM_SKU_8000 = 2,
1898         IWM_N_HW_ADDRS_8000 = 3,
1899
1900         /* NVM REGULATORY -Section offset (in words) definitions */
1901         IWM_NVM_CHANNELS_8000 = 0,
1902         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1903         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1904         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1905
1906         /* NVM calibration section offset (in words) definitions */
1907         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1908         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1909 };
1910
1911 /* SKU Capabilities (actual values from NVM definition) */
1912 enum nvm_sku_bits {
1913         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1914         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1915         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1916         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1917 };
1918
1919 /* radio config bits (actual values from NVM definition) */
1920 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1921 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1922 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1923 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1924 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1925 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1926
1927 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1928 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1929 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1930 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1931 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1932 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1933
1934 #define DEFAULT_MAX_TX_POWER 16
1935
1936 /**
1937  * enum iwm_nvm_channel_flags - channel flags in NVM
1938  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1939  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1940  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1941  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1942  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1943  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1944  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1945  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1946  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1947  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1948  */
1949 enum iwm_nvm_channel_flags {
1950         IWM_NVM_CHANNEL_VALID = (1 << 0),
1951         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1952         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1953         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1954         IWM_NVM_CHANNEL_DFS = (1 << 7),
1955         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1956         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1957         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1958         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1959 };
1960
1961 /*
1962  * Translate EEPROM flags to net80211.
1963  */
1964 static uint32_t
1965 iwm_eeprom_channel_flags(uint16_t ch_flags)
1966 {
1967         uint32_t nflags;
1968
1969         nflags = 0;
1970         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1971                 nflags |= IEEE80211_CHAN_PASSIVE;
1972         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1973                 nflags |= IEEE80211_CHAN_NOADHOC;
1974         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1975                 nflags |= IEEE80211_CHAN_DFS;
1976                 /* Just in case. */
1977                 nflags |= IEEE80211_CHAN_NOADHOC;
1978         }
1979
1980         return (nflags);
1981 }
1982
1983 static void
1984 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1985     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1986     const uint8_t bands[])
1987 {
1988         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1989         uint32_t nflags;
1990         uint16_t ch_flags;
1991         uint8_t ieee;
1992         int error;
1993
1994         for (; ch_idx < ch_num; ch_idx++) {
1995                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1996                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1997                         ieee = iwm_nvm_channels[ch_idx];
1998                 else
1999                         ieee = iwm_nvm_channels_8000[ch_idx];
2000
2001                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2002                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2003                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2004                             ieee, ch_flags,
2005                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2006                             "5.2" : "2.4");
2007                         continue;
2008                 }
2009
2010                 nflags = iwm_eeprom_channel_flags(ch_flags);
2011                 error = ieee80211_add_channel(chans, maxchans, nchans,
2012                     ieee, 0, 0, nflags, bands);
2013                 if (error != 0)
2014                         break;
2015
2016                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2017                     "Ch. %d Flags %x [%sGHz] - Added\n",
2018                     ieee, ch_flags,
2019                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2020                     "5.2" : "2.4");
2021         }
2022 }
2023
2024 static void
2025 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2026     struct ieee80211_channel chans[])
2027 {
2028         struct iwm_softc *sc = ic->ic_softc;
2029         struct iwm_nvm_data *data = sc->nvm_data;
2030         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2031         size_t ch_num;
2032
2033         memset(bands, 0, sizeof(bands));
2034         /* 1-13: 11b/g channels. */
2035         setbit(bands, IEEE80211_MODE_11B);
2036         setbit(bands, IEEE80211_MODE_11G);
2037         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2038             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2039
2040         /* 14: 11b channel only. */
2041         clrbit(bands, IEEE80211_MODE_11G);
2042         iwm_add_channel_band(sc, chans, maxchans, nchans,
2043             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2044
2045         if (data->sku_cap_band_52GHz_enable) {
2046                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2047                         ch_num = nitems(iwm_nvm_channels);
2048                 else
2049                         ch_num = nitems(iwm_nvm_channels_8000);
2050                 memset(bands, 0, sizeof(bands));
2051                 setbit(bands, IEEE80211_MODE_11A);
2052                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2053                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2054         }
2055 }
2056
2057 static void
2058 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2059         const uint16_t *mac_override, const uint16_t *nvm_hw)
2060 {
2061         const uint8_t *hw_addr;
2062
2063         if (mac_override) {
2064                 static const uint8_t reserved_mac[] = {
2065                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2066                 };
2067
2068                 hw_addr = (const uint8_t *)(mac_override +
2069                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2070
2071                 /*
2072                  * Store the MAC address from MAO section.
2073                  * No byte swapping is required in MAO section
2074                  */
2075                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2076
2077                 /*
2078                  * Force the use of the OTP MAC address in case of reserved MAC
2079                  * address in the NVM, or if address is given but invalid.
2080                  */
2081                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2082                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2083                     iwm_is_valid_ether_addr(data->hw_addr) &&
2084                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2085                         return;
2086
2087                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2088                     "%s: mac address from nvm override section invalid\n",
2089                     __func__);
2090         }
2091
2092         if (nvm_hw) {
2093                 /* read the mac address from WFMP registers */
2094                 uint32_t mac_addr0 =
2095                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2096                 uint32_t mac_addr1 =
2097                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2098
2099                 hw_addr = (const uint8_t *)&mac_addr0;
2100                 data->hw_addr[0] = hw_addr[3];
2101                 data->hw_addr[1] = hw_addr[2];
2102                 data->hw_addr[2] = hw_addr[1];
2103                 data->hw_addr[3] = hw_addr[0];
2104
2105                 hw_addr = (const uint8_t *)&mac_addr1;
2106                 data->hw_addr[4] = hw_addr[1];
2107                 data->hw_addr[5] = hw_addr[0];
2108
2109                 return;
2110         }
2111
2112         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2113         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2114 }
2115
2116 static int
2117 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2118             const uint16_t *phy_sku)
2119 {
2120         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2121                 return le16_to_cpup(nvm_sw + IWM_SKU);
2122
2123         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2124 }
2125
2126 static int
2127 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2128 {
2129         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2130                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2131         else
2132                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2133                                                 IWM_NVM_VERSION_8000));
2134 }
2135
2136 static int
2137 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2138                   const uint16_t *phy_sku)
2139 {
2140         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2141                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2142
2143         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2144 }
2145
2146 static int
2147 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2148 {
2149         int n_hw_addr;
2150
2151         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2152                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2153
2154         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2155
2156         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2157 }
2158
2159 static void
2160 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2161                   uint32_t radio_cfg)
2162 {
2163         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2164                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2165                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2166                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2167                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2168                 return;
2169         }
2170
2171         /* set the radio configuration for family 8000 */
2172         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2173         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2174         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2175         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2176         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2177         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2178 }
2179
2180 static int
2181 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2182                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2183 {
2184 #ifdef notyet /* for FAMILY 9000 */
2185         if (cfg->mac_addr_from_csr) {
2186                 iwm_set_hw_address_from_csr(sc, data);
2187         } else
2188 #endif
2189         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2190                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2191
2192                 /* The byte order is little endian 16 bit, meaning 214365 */
2193                 data->hw_addr[0] = hw_addr[1];
2194                 data->hw_addr[1] = hw_addr[0];
2195                 data->hw_addr[2] = hw_addr[3];
2196                 data->hw_addr[3] = hw_addr[2];
2197                 data->hw_addr[4] = hw_addr[5];
2198                 data->hw_addr[5] = hw_addr[4];
2199         } else {
2200                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2201         }
2202
2203         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2204                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2205                 return EINVAL;
2206         }
2207
2208         return 0;
2209 }
2210
2211 static struct iwm_nvm_data *
2212 iwm_parse_nvm_data(struct iwm_softc *sc,
2213                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2214                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2215                    const uint16_t *phy_sku, const uint16_t *regulatory)
2216 {
2217         struct iwm_nvm_data *data;
2218         uint32_t sku, radio_cfg;
2219
2220         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2221                 data = kmalloc(sizeof(*data) +
2222                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2223                     M_DEVBUF, M_WAITOK | M_ZERO);
2224         } else {
2225                 data = kmalloc(sizeof(*data) +
2226                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2227                     M_DEVBUF, M_WAITOK | M_ZERO);
2228         }
2229         if (!data)
2230                 return NULL;
2231
2232         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2233
2234         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2235         iwm_set_radio_cfg(sc, data, radio_cfg);
2236
2237         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2238         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2239         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2240         data->sku_cap_11n_enable = 0;
2241
2242         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2243
2244         /* If no valid mac address was found - bail out */
2245         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2246                 kfree(data, M_DEVBUF);
2247                 return NULL;
2248         }
2249
2250         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2251                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2252                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2253         } else {
2254                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2255                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2256         }
2257
2258         return data;
2259 }
2260
2261 static void
2262 iwm_free_nvm_data(struct iwm_nvm_data *data)
2263 {
2264         if (data != NULL)
2265                 kfree(data, M_DEVBUF);
2266 }
2267
2268 static struct iwm_nvm_data *
2269 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2270 {
2271         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2272
2273         /* Checking for required sections */
2274         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2275                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2276                     !sections[sc->cfg->nvm_hw_section_num].data) {
2277                         device_printf(sc->sc_dev,
2278                             "Can't parse empty OTP/NVM sections\n");
2279                         return NULL;
2280                 }
2281         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2282                 /* SW and REGULATORY sections are mandatory */
2283                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2284                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2285                         device_printf(sc->sc_dev,
2286                             "Can't parse empty OTP/NVM sections\n");
2287                         return NULL;
2288                 }
2289                 /* MAC_OVERRIDE or at least HW section must exist */
2290                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2291                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2292                         device_printf(sc->sc_dev,
2293                             "Can't parse mac_address, empty sections\n");
2294                         return NULL;
2295                 }
2296
2297                 /* PHY_SKU section is mandatory in B0 */
2298                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2299                         device_printf(sc->sc_dev,
2300                             "Can't parse phy_sku in B0, empty sections\n");
2301                         return NULL;
2302                 }
2303         } else {
2304                 panic("unknown device family %d\n", sc->cfg->device_family);
2305         }
2306
2307         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2308         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2309         calib = (const uint16_t *)
2310             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2311         regulatory = (const uint16_t *)
2312             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2313         mac_override = (const uint16_t *)
2314             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2315         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2316
2317         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2318             phy_sku, regulatory);
2319 }
2320
2321 static int
2322 iwm_nvm_init(struct iwm_softc *sc)
2323 {
2324         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2325         int i, ret, section;
2326         uint32_t size_read = 0;
2327         uint8_t *nvm_buffer, *temp;
2328         uint16_t len;
2329
2330         memset(nvm_sections, 0, sizeof(nvm_sections));
2331
2332         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2333                 return EINVAL;
2334
2335         /* load NVM values from nic */
2336         /* Read From FW NVM */
2337         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2338
2339         nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2340             M_INTWAIT | M_ZERO);
2341         if (!nvm_buffer)
2342                 return ENOMEM;
2343         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2344                 /* we override the constness for initial read */
2345                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2346                                            &len, size_read);
2347                 if (ret)
2348                         continue;
2349                 size_read += len;
2350                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2351                 if (!temp) {
2352                         ret = ENOMEM;
2353                         break;
2354                 }
2355                 memcpy(temp, nvm_buffer, len);
2356
2357                 nvm_sections[section].data = temp;
2358                 nvm_sections[section].length = len;
2359         }
2360         if (!size_read)
2361                 device_printf(sc->sc_dev, "OTP is blank\n");
2362         kfree(nvm_buffer, M_DEVBUF);
2363
2364         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2365         if (!sc->nvm_data)
2366                 return EINVAL;
2367         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2368                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2369
2370         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2371                 if (nvm_sections[i].data != NULL)
2372                         kfree(nvm_sections[i].data, M_DEVBUF);
2373         }
2374
2375         return 0;
2376 }
2377
2378 static int
2379 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2380         const struct iwm_fw_desc *section)
2381 {
2382         struct iwm_dma_info *dma = &sc->fw_dma;
2383         uint8_t *v_addr;
2384         bus_addr_t p_addr;
2385         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2386         int ret = 0;
2387
2388         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2389                     "%s: [%d] uCode section being loaded...\n",
2390                     __func__, section_num);
2391
2392         v_addr = dma->vaddr;
2393         p_addr = dma->paddr;
2394
2395         for (offset = 0; offset < section->len; offset += chunk_sz) {
2396                 uint32_t copy_size, dst_addr;
2397                 int extended_addr = FALSE;
2398
2399                 copy_size = MIN(chunk_sz, section->len - offset);
2400                 dst_addr = section->offset + offset;
2401
2402                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2403                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2404                         extended_addr = TRUE;
2405
2406                 if (extended_addr)
2407                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2408                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2409
2410                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2411                     copy_size);
2412                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2413                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2414                                                    copy_size);
2415
2416                 if (extended_addr)
2417                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2418                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2419
2420                 if (ret) {
2421                         device_printf(sc->sc_dev,
2422                             "%s: Could not load the [%d] uCode section\n",
2423                             __func__, section_num);
2424                         break;
2425                 }
2426         }
2427
2428         return ret;
2429 }
2430
2431 /*
2432  * ucode
2433  */
2434 static int
2435 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2436                              bus_addr_t phy_addr, uint32_t byte_cnt)
2437 {
2438         int ret;
2439
2440         sc->sc_fw_chunk_done = 0;
2441
2442         if (!iwm_nic_lock(sc))
2443                 return EBUSY;
2444
2445         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2446             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2447
2448         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2449             dst_addr);
2450
2451         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2452             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2453
2454         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2455             (iwm_get_dma_hi_addr(phy_addr)
2456              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2457
2458         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2459             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2460             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2461             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2462
2463         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2464             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2465             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2466             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2467
2468         iwm_nic_unlock(sc);
2469
2470         /* wait up to 5s for this segment to load */
2471         ret = 0;
2472         while (!sc->sc_fw_chunk_done) {
2473 #if defined(__DragonFly__)
2474                 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2475 #else
2476                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2477 #endif
2478                 if (ret)
2479                         break;
2480         }
2481
2482         if (ret != 0) {
2483                 device_printf(sc->sc_dev,
2484                     "fw chunk addr 0x%x len %d failed to load\n",
2485                     dst_addr, byte_cnt);
2486                 return ETIMEDOUT;
2487         }
2488
2489         return 0;
2490 }
2491
2492 static int
2493 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2494         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2495 {
2496         int shift_param;
2497         int i, ret = 0, sec_num = 0x1;
2498         uint32_t val, last_read_idx = 0;
2499
2500         if (cpu == 1) {
2501                 shift_param = 0;
2502                 *first_ucode_section = 0;
2503         } else {
2504                 shift_param = 16;
2505                 (*first_ucode_section)++;
2506         }
2507
2508         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2509                 last_read_idx = i;
2510
2511                 /*
2512                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2513                  * CPU1 to CPU2.
2514                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2515                  * CPU2 non paged to CPU2 paging sec.
2516                  */
2517                 if (!image->fw_sect[i].data ||
2518                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2519                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2520                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2521                                     "Break since Data not valid or Empty section, sec = %d\n",
2522                                     i);
2523                         break;
2524                 }
2525                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2526                 if (ret)
2527                         return ret;
2528
2529                 /* Notify the ucode of the loaded section number and status */
2530                 if (iwm_nic_lock(sc)) {
2531                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2532                         val = val | (sec_num << shift_param);
2533                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2534                         sec_num = (sec_num << 1) | 0x1;
2535                         iwm_nic_unlock(sc);
2536                 }
2537         }
2538
2539         *first_ucode_section = last_read_idx;
2540
2541         iwm_enable_interrupts(sc);
2542
2543         if (iwm_nic_lock(sc)) {
2544                 if (cpu == 1)
2545                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2546                 else
2547                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2548                 iwm_nic_unlock(sc);
2549         }
2550
2551         return 0;
2552 }
2553
2554 static int
2555 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2556         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2557 {
2558         int shift_param;
2559         int i, ret = 0;
2560         uint32_t last_read_idx = 0;
2561
2562         if (cpu == 1) {
2563                 shift_param = 0;
2564                 *first_ucode_section = 0;
2565         } else {
2566                 shift_param = 16;
2567                 (*first_ucode_section)++;
2568         }
2569
2570         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2571                 last_read_idx = i;
2572
2573                 /*
2574                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2575                  * CPU1 to CPU2.
2576                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2577                  * CPU2 non paged to CPU2 paging sec.
2578                  */
2579                 if (!image->fw_sect[i].data ||
2580                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2581                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2582                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2583                                     "Break since Data not valid or Empty section, sec = %d\n",
2584                                      i);
2585                         break;
2586                 }
2587
2588                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2589                 if (ret)
2590                         return ret;
2591         }
2592
2593         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2594                 iwm_set_bits_prph(sc,
2595                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2596                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2597                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2598                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2599                                         shift_param);
2600
2601         *first_ucode_section = last_read_idx;
2602
2603         return 0;
2604
2605 }
2606
2607 static int
2608 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2609         const struct iwm_fw_sects *image)
2610 {
2611         int ret = 0;
2612         int first_ucode_section;
2613
2614         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2615                      image->is_dual_cpus ? "Dual" : "Single");
2616
2617         /* load to FW the binary non secured sections of CPU1 */
2618         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2619         if (ret)
2620                 return ret;
2621
2622         if (image->is_dual_cpus) {
2623                 /* set CPU2 header address */
2624                 iwm_write_prph(sc,
2625                                IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2626                                IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2627
2628                 /* load to FW the binary sections of CPU2 */
2629                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2630                                                  &first_ucode_section);
2631                 if (ret)
2632                         return ret;
2633         }
2634
2635         iwm_enable_interrupts(sc);
2636
2637         /* release CPU reset */
2638         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2639
2640         return 0;
2641 }
2642
2643 int
2644 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2645         const struct iwm_fw_sects *image)
2646 {
2647         int ret = 0;
2648         int first_ucode_section;
2649
2650         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2651                     image->is_dual_cpus ? "Dual" : "Single");
2652
2653         /* configure the ucode to be ready to get the secured image */
2654         /* release CPU reset */
2655         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2656
2657         /* load to FW the binary Secured sections of CPU1 */
2658         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2659             &first_ucode_section);
2660         if (ret)
2661                 return ret;
2662
2663         /* load to FW the binary sections of CPU2 */
2664         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2665             &first_ucode_section);
2666 }
2667
2668 /* XXX Get rid of this definition */
2669 static inline void
2670 iwm_enable_fw_load_int(struct iwm_softc *sc)
2671 {
2672         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2673         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2674         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2675 }
2676
2677 /* XXX Add proper rfkill support code */
2678 static int
2679 iwm_start_fw(struct iwm_softc *sc,
2680         const struct iwm_fw_sects *fw)
2681 {
2682         int ret;
2683
2684         /* This may fail if AMT took ownership of the device */
2685         if (iwm_prepare_card_hw(sc)) {
2686                 device_printf(sc->sc_dev,
2687                     "%s: Exit HW not ready\n", __func__);
2688                 ret = EIO;
2689                 goto out;
2690         }
2691
2692         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2693
2694         iwm_disable_interrupts(sc);
2695
2696         /* make sure rfkill handshake bits are cleared */
2697         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2698         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2699             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2700
2701         /* clear (again), then enable host interrupts */
2702         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2703
2704         ret = iwm_nic_init(sc);
2705         if (ret) {
2706                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2707                 goto out;
2708         }
2709
2710         /*
2711          * Now, we load the firmware and don't want to be interrupted, even
2712          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2713          * FH_TX interrupt which is needed to load the firmware). If the
2714          * RF-Kill switch is toggled, we will find out after having loaded
2715          * the firmware and return the proper value to the caller.
2716          */
2717         iwm_enable_fw_load_int(sc);
2718
2719         /* really make sure rfkill handshake bits are cleared */
2720         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2721         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2722
2723         /* Load the given image to the HW */
2724         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2725                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2726         else
2727                 ret = iwm_pcie_load_given_ucode(sc, fw);
2728
2729         /* XXX re-check RF-Kill state */
2730
2731 out:
2732         return ret;
2733 }
2734
2735 static int
2736 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2737 {
2738         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2739                 .valid = htole32(valid_tx_ant),
2740         };
2741
2742         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2743             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2744 }
2745
2746 static int
2747 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2748 {
2749         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2750         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2751
2752         /* Set parameters */
2753         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2754         phy_cfg_cmd.calib_control.event_trigger =
2755             sc->sc_default_calib[ucode_type].event_trigger;
2756         phy_cfg_cmd.calib_control.flow_trigger =
2757             sc->sc_default_calib[ucode_type].flow_trigger;
2758
2759         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2760             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2761         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2762             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2763 }
2764
2765 static int
2766 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2767 {
2768         struct iwm_mvm_alive_data *alive_data = data;
2769         struct iwm_mvm_alive_resp_ver1 *palive1;
2770         struct iwm_mvm_alive_resp_ver2 *palive2;
2771         struct iwm_mvm_alive_resp *palive;
2772
2773         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2774                 palive1 = (void *)pkt->data;
2775
2776                 sc->support_umac_log = FALSE;
2777                 sc->error_event_table =
2778                         le32toh(palive1->error_event_table_ptr);
2779                 sc->log_event_table =
2780                         le32toh(palive1->log_event_table_ptr);
2781                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2782
2783                 alive_data->valid = le16toh(palive1->status) ==
2784                                     IWM_ALIVE_STATUS_OK;
2785                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2786                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2787                              le16toh(palive1->status), palive1->ver_type,
2788                              palive1->ver_subtype, palive1->flags);
2789         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2790                 palive2 = (void *)pkt->data;
2791                 sc->error_event_table =
2792                         le32toh(palive2->error_event_table_ptr);
2793                 sc->log_event_table =
2794                         le32toh(palive2->log_event_table_ptr);
2795                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2796                 sc->umac_error_event_table =
2797                         le32toh(palive2->error_info_addr);
2798
2799                 alive_data->valid = le16toh(palive2->status) ==
2800                                     IWM_ALIVE_STATUS_OK;
2801                 if (sc->umac_error_event_table)
2802                         sc->support_umac_log = TRUE;
2803
2804                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2805                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2806                             le16toh(palive2->status), palive2->ver_type,
2807                             palive2->ver_subtype, palive2->flags);
2808
2809                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2810                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2811                             palive2->umac_major, palive2->umac_minor);
2812         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2813                 palive = (void *)pkt->data;
2814
2815                 sc->error_event_table =
2816                         le32toh(palive->error_event_table_ptr);
2817                 sc->log_event_table =
2818                         le32toh(palive->log_event_table_ptr);
2819                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2820                 sc->umac_error_event_table =
2821                         le32toh(palive->error_info_addr);
2822
2823                 alive_data->valid = le16toh(palive->status) ==
2824                                     IWM_ALIVE_STATUS_OK;
2825                 if (sc->umac_error_event_table)
2826                         sc->support_umac_log = TRUE;
2827
2828                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2829                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2830                             le16toh(palive->status), palive->ver_type,
2831                             palive->ver_subtype, palive->flags);
2832
2833                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2834                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2835                             le32toh(palive->umac_major),
2836                             le32toh(palive->umac_minor));
2837         }
2838
2839         return TRUE;
2840 }
2841
2842 static int
2843 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2844         struct iwm_rx_packet *pkt, void *data)
2845 {
2846         struct iwm_phy_db *phy_db = data;
2847
2848         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2849                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2850                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2851                             __func__, pkt->hdr.code);
2852                 }
2853                 return TRUE;
2854         }
2855
2856         if (iwm_phy_db_set_section(phy_db, pkt)) {
2857                 device_printf(sc->sc_dev,
2858                     "%s: iwm_phy_db_set_section failed\n", __func__);
2859         }
2860
2861         return FALSE;
2862 }
2863
2864 static int
2865 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2866         enum iwm_ucode_type ucode_type)
2867 {
2868         struct iwm_notification_wait alive_wait;
2869         struct iwm_mvm_alive_data alive_data;
2870         const struct iwm_fw_sects *fw;
2871         enum iwm_ucode_type old_type = sc->cur_ucode;
2872         int error;
2873         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2874
2875         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2876                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2877                         error);
2878                 return error;
2879         }
2880         fw = &sc->sc_fw.fw_sects[ucode_type];
2881         sc->cur_ucode = ucode_type;
2882         sc->ucode_loaded = FALSE;
2883
2884         memset(&alive_data, 0, sizeof(alive_data));
2885         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2886                                    alive_cmd, NELEM(alive_cmd),
2887                                    iwm_alive_fn, &alive_data);
2888
2889         error = iwm_start_fw(sc, fw);
2890         if (error) {
2891                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2892                 sc->cur_ucode = old_type;
2893                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2894                 return error;
2895         }
2896
2897         /*
2898          * Some things may run in the background now, but we
2899          * just wait for the ALIVE notification here.
2900          */
2901         IWM_UNLOCK(sc);
2902         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2903                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2904         IWM_LOCK(sc);
2905         if (error) {
2906                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2907                         device_printf(sc->sc_dev,
2908                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2909                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2910                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2911                 }
2912                 sc->cur_ucode = old_type;
2913                 return error;
2914         }
2915
2916         if (!alive_data.valid) {
2917                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2918                     __func__);
2919                 sc->cur_ucode = old_type;
2920                 return EIO;
2921         }
2922
2923         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2924
2925         /*
2926          * configure and operate fw paging mechanism.
2927          * driver configures the paging flow only once, CPU2 paging image
2928          * included in the IWM_UCODE_INIT image.
2929          */
2930         if (fw->paging_mem_size) {
2931                 error = iwm_save_fw_paging(sc, fw);
2932                 if (error) {
2933                         device_printf(sc->sc_dev,
2934                             "%s: failed to save the FW paging image\n",
2935                             __func__);
2936                         return error;
2937                 }
2938
2939                 error = iwm_send_paging_cmd(sc, fw);
2940                 if (error) {
2941                         device_printf(sc->sc_dev,
2942                             "%s: failed to send the paging cmd\n", __func__);
2943                         iwm_free_fw_paging(sc);
2944                         return error;
2945                 }
2946         }
2947
2948         if (!error)
2949                 sc->ucode_loaded = TRUE;
2950         return error;
2951 }
2952
2953 /*
2954  * mvm misc bits
2955  */
2956
2957 static int
2958 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2959 {
2960         struct iwm_notification_wait calib_wait;
2961         static const uint16_t init_complete[] = {
2962                 IWM_INIT_COMPLETE_NOTIF,
2963                 IWM_CALIB_RES_NOTIF_PHY_DB
2964         };
2965         int ret;
2966
2967         /* do not operate with rfkill switch turned on */
2968         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2969                 device_printf(sc->sc_dev,
2970                     "radio is disabled by hardware switch\n");
2971                 return EPERM;
2972         }
2973
2974         iwm_init_notification_wait(sc->sc_notif_wait,
2975                                    &calib_wait,
2976                                    init_complete,
2977                                    NELEM(init_complete),
2978                                    iwm_wait_phy_db_entry,
2979                                    sc->sc_phy_db);
2980
2981         /* Will also start the device */
2982         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2983         if (ret) {
2984                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2985                     ret);
2986                 goto error;
2987         }
2988
2989         if (justnvm) {
2990                 /* Read nvm */
2991                 ret = iwm_nvm_init(sc);
2992                 if (ret) {
2993                         device_printf(sc->sc_dev, "failed to read nvm\n");
2994                         goto error;
2995                 }
2996                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2997                 goto error;
2998         }
2999
3000         ret = iwm_send_bt_init_conf(sc);
3001         if (ret) {
3002                 device_printf(sc->sc_dev,
3003                     "failed to send bt coex configuration: %d\n", ret);
3004                 goto error;
3005         }
3006
3007         /* Init Smart FIFO. */
3008         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3009         if (ret)
3010                 goto error;
3011
3012         /* Send TX valid antennas before triggering calibrations */
3013         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3014         if (ret) {
3015                 device_printf(sc->sc_dev,
3016                     "failed to send antennas before calibration: %d\n", ret);
3017                 goto error;
3018         }
3019
3020         /*
3021          * Send phy configurations command to init uCode
3022          * to start the 16.0 uCode init image internal calibrations.
3023          */
3024         ret = iwm_send_phy_cfg_cmd(sc);
3025         if (ret) {
3026                 device_printf(sc->sc_dev,
3027                     "%s: Failed to run INIT calibrations: %d\n",
3028                     __func__, ret);
3029                 goto error;
3030         }
3031
3032         /*
3033          * Nothing to do but wait for the init complete notification
3034          * from the firmware.
3035          */
3036         IWM_UNLOCK(sc);
3037         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3038             IWM_MVM_UCODE_CALIB_TIMEOUT);
3039         IWM_LOCK(sc);
3040
3041
3042         goto out;
3043
3044 error:
3045         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3046 out:
3047         return ret;
3048 }
3049
3050 /*
3051  * receive side
3052  */
3053
3054 /* (re)stock rx ring, called at init-time and at runtime */
3055 static int
3056 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3057 {
3058         struct iwm_rx_ring *ring = &sc->rxq;
3059         struct iwm_rx_data *data = &ring->data[idx];
3060         struct mbuf *m;
3061         bus_dmamap_t dmamap = NULL;
3062         bus_dma_segment_t seg;
3063         int nsegs, error;
3064
3065         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3066         if (m == NULL)
3067                 return ENOBUFS;
3068
3069         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3070 #if defined(__DragonFly__)
3071         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3072             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3073 #else
3074         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3075             &seg, &nsegs, BUS_DMA_NOWAIT);
3076 #endif
3077         if (error != 0) {
3078                 device_printf(sc->sc_dev,
3079                     "%s: can't map mbuf, error %d\n", __func__, error);
3080                 goto fail;
3081         }
3082
3083         if (data->m != NULL)
3084                 bus_dmamap_unload(ring->data_dmat, data->map);
3085
3086         /* Swap ring->spare_map with data->map */
3087         dmamap = data->map;
3088         data->map = ring->spare_map;
3089         ring->spare_map = dmamap;
3090
3091         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3092         data->m = m;
3093
3094         /* Update RX descriptor. */
3095         KKASSERT((seg.ds_addr & 255) == 0);
3096         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3097         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3098             BUS_DMASYNC_PREWRITE);
3099
3100         return 0;
3101 fail:
3102         m_freem(m);
3103         return error;
3104 }
3105
3106 #define IWM_RSSI_OFFSET 50
3107 static int
3108 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3109 {
3110         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3111         uint32_t agc_a, agc_b;
3112         uint32_t val;
3113
3114         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3115         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3116         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3117
3118         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3119         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3120         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3121
3122         /*
3123          * dBm = rssi dB - agc dB - constant.
3124          * Higher AGC (higher radio gain) means lower signal.
3125          */
3126         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3127         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3128         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3129
3130         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3131             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3132             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3133
3134         return max_rssi_dbm;
3135 }
3136
3137 /*
3138  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3139  * values are reported by the fw as positive values - need to negate
3140  * to obtain their dBM.  Account for missing antennas by replacing 0
3141  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3142  */
3143 static int
3144 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3145 {
3146         int energy_a, energy_b, energy_c, max_energy;
3147         uint32_t val;
3148
3149         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3150         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3151             IWM_RX_INFO_ENERGY_ANT_A_POS;
3152         energy_a = energy_a ? -energy_a : -256;
3153         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3154             IWM_RX_INFO_ENERGY_ANT_B_POS;
3155         energy_b = energy_b ? -energy_b : -256;
3156         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3157             IWM_RX_INFO_ENERGY_ANT_C_POS;
3158         energy_c = energy_c ? -energy_c : -256;
3159         max_energy = MAX(energy_a, energy_b);
3160         max_energy = MAX(max_energy, energy_c);
3161
3162         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3163             "energy In A %d B %d C %d , and max %d\n",
3164             energy_a, energy_b, energy_c, max_energy);
3165
3166         return max_energy;
3167 }
3168
3169 static void
3170 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3171 {
3172         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3173
3174         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3175
3176         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3177 }
3178
3179 /*
3180  * Retrieve the average noise (in dBm) among receivers.
3181  */
3182 static int
3183 iwm_get_noise(struct iwm_softc *sc,
3184         const struct iwm_mvm_statistics_rx_non_phy *stats)
3185 {
3186         int i, total, nbant, noise;
3187
3188         total = nbant = noise = 0;
3189         for (i = 0; i < 3; i++) {
3190                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3191                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3192                     __func__, i, noise);
3193
3194                 if (noise) {
3195                         total += noise;
3196                         nbant++;
3197                 }
3198         }
3199
3200         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3201             __func__, nbant, total);
3202 #if 0
3203         /* There should be at least one antenna but check anyway. */
3204         return (nbant == 0) ? -127 : (total / nbant) - 107;
3205 #else
3206         /* For now, just hard-code it to -96 to be safe */
3207         return (-96);
3208 #endif
3209 }
3210
3211 /*
3212  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3213  *
3214  * Handles the actual data of the Rx packet from the fw
3215  */
3216 static void
3217 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3218         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3219 {
3220         struct ieee80211com *ic = &sc->sc_ic;
3221         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3222         struct ieee80211_frame *wh;
3223         struct ieee80211_node *ni;
3224         struct ieee80211_rx_stats rxs;
3225         struct mbuf *m;
3226         struct iwm_rx_phy_info *phy_info;
3227         struct iwm_rx_mpdu_res_start *rx_res;
3228         uint32_t len;
3229         uint32_t rx_pkt_status;
3230         int rssi;
3231
3232         phy_info = &sc->sc_last_phy_info;
3233         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3234         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3235         len = le16toh(rx_res->byte_count);
3236         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3237
3238         m = data->m;
3239         m->m_data = pkt->data + sizeof(*rx_res);
3240         m->m_pkthdr.len = m->m_len = len;
3241
3242         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3243                 device_printf(sc->sc_dev,
3244                     "dsp size out of range [0,20]: %d\n",
3245                     phy_info->cfg_phy_cnt);
3246                 return;
3247         }
3248
3249         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3250             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3251                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3252                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3253                 return; /* drop */
3254         }
3255
3256         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3257                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3258         } else {
3259                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3260         }
3261         /* Note: RSSI is absolute (ie a -ve value) */
3262         if (rssi < IWM_MIN_DBM)
3263                 rssi = IWM_MIN_DBM;
3264         else if (rssi > IWM_MAX_DBM)
3265                 rssi = IWM_MAX_DBM;
3266
3267         /* Map it to relative value */
3268         rssi = rssi - sc->sc_noise;
3269
3270         /* replenish ring for the buffer we're going to feed to the sharks */
3271         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3272                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3273                     __func__);
3274                 return;
3275         }
3276
3277         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3278             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3279
3280         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3281
3282         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3283             "%s: phy_info: channel=%d, flags=0x%08x\n",
3284             __func__,
3285             le16toh(phy_info->channel),
3286             le16toh(phy_info->phy_flags));
3287
3288         /*
3289          * Populate an RX state struct with the provided information.
3290          */
3291         bzero(&rxs, sizeof(rxs));
3292         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3293         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3294         rxs.c_ieee = le16toh(phy_info->channel);
3295         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3296                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3297         } else {
3298                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3299         }
3300         /* rssi is in 1/2db units */
3301         rxs.rssi = rssi * 2;
3302         rxs.nf = sc->sc_noise;
3303
3304         if (ieee80211_radiotap_active_vap(vap)) {
3305                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3306
3307                 tap->wr_flags = 0;
3308                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3309                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3310                 tap->wr_chan_freq = htole16(rxs.c_freq);
3311                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3312                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3313                 tap->wr_dbm_antsignal = (int8_t)rssi;
3314                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3315                 tap->wr_tsft = phy_info->system_timestamp;
3316                 switch (phy_info->rate) {
3317                 /* CCK rates. */
3318                 case  10: tap->wr_rate =   2; break;
3319                 case  20: tap->wr_rate =   4; break;
3320                 case  55: tap->wr_rate =  11; break;
3321                 case 110: tap->wr_rate =  22; break;
3322                 /* OFDM rates. */
3323                 case 0xd: tap->wr_rate =  12; break;
3324                 case 0xf: tap->wr_rate =  18; break;
3325                 case 0x5: tap->wr_rate =  24; break;
3326                 case 0x7: tap->wr_rate =  36; break;
3327                 case 0x9: tap->wr_rate =  48; break;
3328                 case 0xb: tap->wr_rate =  72; break;
3329                 case 0x1: tap->wr_rate =  96; break;
3330                 case 0x3: tap->wr_rate = 108; break;
3331                 /* Unknown rate: should not happen. */
3332                 default:  tap->wr_rate =   0;
3333                 }
3334         }
3335
3336         IWM_UNLOCK(sc);
3337         if (ni != NULL) {
3338                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3339                 ieee80211_input_mimo(ni, m, &rxs);
3340                 ieee80211_free_node(ni);
3341         } else {
3342                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3343                 ieee80211_input_mimo_all(ic, m, &rxs);
3344         }
3345         IWM_LOCK(sc);
3346 }
3347
3348 static int
3349 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3350         struct iwm_node *in)
3351 {
3352         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3353         struct ieee80211_node *ni = &in->in_ni;
3354         struct ieee80211vap *vap = ni->ni_vap;
3355         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3356         int failack = tx_resp->failure_frame;
3357
3358         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3359
3360         /* Update rate control statistics. */
3361         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3362             __func__,
3363             (int) le16toh(tx_resp->status.status),
3364             (int) le16toh(tx_resp->status.sequence),
3365             tx_resp->frame_count,
3366             tx_resp->bt_kill_count,
3367             tx_resp->failure_rts,
3368             tx_resp->failure_frame,
3369             le32toh(tx_resp->initial_rate),
3370             (int) le16toh(tx_resp->wireless_media_time));
3371
3372         if (status != IWM_TX_STATUS_SUCCESS &&
3373             status != IWM_TX_STATUS_DIRECT_DONE) {
3374                 ieee80211_ratectl_tx_complete(vap, ni,
3375                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3376                 return (1);
3377         } else {
3378                 ieee80211_ratectl_tx_complete(vap, ni,
3379                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3380                 return (0);
3381         }
3382 }
3383
3384 static void
3385 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3386 {
3387         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3388         int idx = cmd_hdr->idx;
3389         int qid = cmd_hdr->qid;
3390         struct iwm_tx_ring *ring = &sc->txq[qid];
3391         struct iwm_tx_data *txd = &ring->data[idx];
3392         struct iwm_node *in = txd->in;
3393         struct mbuf *m = txd->m;
3394         int status;
3395
3396         KASSERT(txd->done == 0, ("txd not done"));
3397         KASSERT(txd->in != NULL, ("txd without node"));
3398         KASSERT(txd->m != NULL, ("txd without mbuf"));
3399
3400         sc->sc_tx_timer = 0;
3401
3402         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3403
3404         /* Unmap and free mbuf. */
3405         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3406         bus_dmamap_unload(ring->data_dmat, txd->map);
3407
3408         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3409             "free txd %p, in %p\n", txd, txd->in);
3410         txd->done = 1;
3411         txd->m = NULL;
3412         txd->in = NULL;
3413
3414         ieee80211_tx_complete(&in->in_ni, m, status);
3415
3416         if (--ring->queued < IWM_TX_RING_LOMARK) {
3417                 sc->qfullmsk &= ~(1 << ring->qid);
3418                 if (sc->qfullmsk == 0) {
3419                         iwm_start(sc);
3420                 }
3421         }
3422 }
3423
3424 /*
3425  * transmit side
3426  */
3427
3428 /*
3429  * Process a "command done" firmware notification.  This is where we wakeup
3430  * processes waiting for a synchronous command completion.
3431  * from if_iwn
3432  */
3433 static void
3434 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3435 {
3436         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3437         struct iwm_tx_data *data;
3438
3439         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3440                 return; /* Not a command ack. */
3441         }
3442
3443         data = &ring->data[pkt->hdr.idx];
3444
3445         /* If the command was mapped in an mbuf, free it. */
3446         if (data->m != NULL) {
3447                 bus_dmamap_sync(ring->data_dmat, data->map,
3448                     BUS_DMASYNC_POSTWRITE);
3449                 bus_dmamap_unload(ring->data_dmat, data->map);
3450                 m_freem(data->m);
3451                 data->m = NULL;
3452         }
3453         wakeup(&ring->desc[pkt->hdr.idx]);
3454
3455         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3456                 device_printf(sc->sc_dev,
3457                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3458                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3459                 /* XXX call iwm_force_nmi() */
3460         }
3461
3462         KKASSERT(ring->queued > 0);
3463         ring->queued--;
3464         if (ring->queued == 0)
3465                 iwm_pcie_clear_cmd_in_flight(sc);
3466 }
3467
3468 #if 0
3469 /*
3470  * necessary only for block ack mode
3471  */
3472 void
3473 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3474         uint16_t len)
3475 {
3476         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3477         uint16_t w_val;
3478
3479         scd_bc_tbl = sc->sched_dma.vaddr;
3480
3481         len += 8; /* magic numbers came naturally from paris */
3482         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3483                 len = roundup(len, 4) / 4;
3484
3485         w_val = htole16(sta_id << 12 | len);
3486
3487         /* Update TX scheduler. */
3488         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3489         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3490             BUS_DMASYNC_PREWRITE);
3491
3492         /* I really wonder what this is ?!? */
3493         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3494                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3495                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3496                     BUS_DMASYNC_PREWRITE);
3497         }
3498 }
3499 #endif
3500
3501 /*
3502  * Take an 802.11 (non-n) rate, find the relevant rate
3503  * table entry.  return the index into in_ridx[].
3504  *
3505  * The caller then uses that index back into in_ridx
3506  * to figure out the rate index programmed /into/
3507  * the firmware for this given node.
3508  */
3509 static int
3510 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3511     uint8_t rate)
3512 {
3513         int i;
3514         uint8_t r;
3515
3516         for (i = 0; i < nitems(in->in_ridx); i++) {
3517                 r = iwm_rates[in->in_ridx[i]].rate;
3518                 if (rate == r)
3519                         return (i);
3520         }
3521         /* XXX Return the first */
3522         /* XXX TODO: have it return the /lowest/ */
3523         return (0);
3524 }
3525
3526 /*
3527  * Fill in the rate related information for a transmit command.
3528  */
3529 static const struct iwm_rate *
3530 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3531         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3532 {
3533         struct ieee80211com *ic = &sc->sc_ic;
3534         struct ieee80211_node *ni = &in->in_ni;
3535         const struct iwm_rate *rinfo;
3536         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3537         int ridx, rate_flags;
3538
3539         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3540         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3541
3542         /*
3543          * XXX TODO: everything about the rate selection here is terrible!
3544          */
3545
3546         if (type == IEEE80211_FC0_TYPE_DATA) {
3547                 int i;
3548                 /* for data frames, use RS table */
3549                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3550                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3551                 ridx = in->in_ridx[i];
3552
3553                 /* This is the index into the programmed table */
3554                 tx->initial_rate_index = i;
3555                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3556                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3557                     "%s: start with i=%d, txrate %d\n",
3558                     __func__, i, iwm_rates[ridx].rate);
3559         } else {
3560                 /*
3561                  * For non-data, use the lowest supported rate for the given
3562                  * operational mode.
3563                  *
3564                  * Note: there may not be any rate control information available.
3565                  * This driver currently assumes if we're transmitting data
3566                  * frames, use the rate control table.  Grr.
3567                  *
3568                  * XXX TODO: use the configured rate for the traffic type!
3569                  * XXX TODO: this should be per-vap, not curmode; as we later
3570                  * on we'll want to handle off-channel stuff (eg TDLS).
3571                  */
3572                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3573                         /*
3574                          * XXX this assumes the mode is either 11a or not 11a;
3575                          * definitely won't work for 11n.
3576                          */
3577                         ridx = IWM_RIDX_OFDM;
3578                 } else {
3579                         ridx = IWM_RIDX_CCK;
3580                 }
3581         }
3582
3583         rinfo = &iwm_rates[ridx];
3584
3585         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3586             __func__, ridx,
3587             rinfo->rate,
3588             !! (IWM_RIDX_IS_CCK(ridx))
3589             );
3590
3591         /* XXX TODO: hard-coded TX antenna? */
3592         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3593         if (IWM_RIDX_IS_CCK(ridx))
3594                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3595         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3596
3597         return rinfo;
3598 }
3599
3600 #define TB0_SIZE 16
3601 static int
3602 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3603 {
3604         struct ieee80211com *ic = &sc->sc_ic;
3605         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3606         struct iwm_node *in = IWM_NODE(ni);
3607         struct iwm_tx_ring *ring;
3608         struct iwm_tx_data *data;
3609         struct iwm_tfd *desc;
3610         struct iwm_device_cmd *cmd;
3611         struct iwm_tx_cmd *tx;
3612         struct ieee80211_frame *wh;
3613         struct ieee80211_key *k = NULL;
3614 #if !defined(__DragonFly__)
3615         struct mbuf *m1;
3616 #endif
3617         const struct iwm_rate *rinfo;
3618         uint32_t flags;
3619         u_int hdrlen;
3620         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3621         int nsegs;
3622         uint8_t tid, type;
3623         int i, totlen, error, pad;
3624
3625         wh = mtod(m, struct ieee80211_frame *);
3626         hdrlen = ieee80211_anyhdrsize(wh);
3627         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3628         tid = 0;
3629         ring = &sc->txq[ac];
3630         desc = &ring->desc[ring->cur];
3631         memset(desc, 0, sizeof(*desc));
3632         data = &ring->data[ring->cur];
3633
3634         /* Fill out iwm_tx_cmd to send to the firmware */
3635         cmd = &ring->cmd[ring->cur];
3636         cmd->hdr.code = IWM_TX_CMD;
3637         cmd->hdr.flags = 0;
3638         cmd->hdr.qid = ring->qid;
3639         cmd->hdr.idx = ring->cur;
3640
3641         tx = (void *)cmd->data;
3642         memset(tx, 0, sizeof(*tx));
3643
3644         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3645
3646         /* Encrypt the frame if need be. */
3647         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3648                 /* Retrieve key for TX && do software encryption. */
3649                 k = ieee80211_crypto_encap(ni, m);
3650                 if (k == NULL) {
3651                         m_freem(m);
3652                         return (ENOBUFS);
3653                 }
3654                 /* 802.11 header may have moved. */
3655                 wh = mtod(m, struct ieee80211_frame *);
3656         }
3657
3658         if (ieee80211_radiotap_active_vap(vap)) {
3659                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3660
3661                 tap->wt_flags = 0;
3662                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3663                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3664                 tap->wt_rate = rinfo->rate;
3665                 if (k != NULL)
3666                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3667                 ieee80211_radiotap_tx(vap, m);
3668         }
3669
3670
3671         totlen = m->m_pkthdr.len;
3672
3673         flags = 0;
3674         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3675                 flags |= IWM_TX_CMD_FLG_ACK;
3676         }
3677
3678         if (type == IEEE80211_FC0_TYPE_DATA
3679             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3680             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3681                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3682         }
3683
3684         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3685             type != IEEE80211_FC0_TYPE_DATA)
3686                 tx->sta_id = sc->sc_aux_sta.sta_id;
3687         else
3688                 tx->sta_id = IWM_STATION_ID;
3689
3690         if (type == IEEE80211_FC0_TYPE_MGT) {
3691                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3692
3693                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3694                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3695                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3696                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3697                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3698                 } else {
3699                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3700                 }
3701         } else {
3702                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3703         }
3704
3705         if (hdrlen & 3) {
3706                 /* First segment length must be a multiple of 4. */
3707                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3708                 pad = 4 - (hdrlen & 3);
3709         } else
3710                 pad = 0;
3711
3712         tx->driver_txop = 0;
3713         tx->next_frame_len = 0;
3714
3715         tx->len = htole16(totlen);
3716         tx->tid_tspec = tid;
3717         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3718
3719         /* Set physical address of "scratch area". */
3720         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3721         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3722
3723         /* Copy 802.11 header in TX command. */
3724         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3725
3726         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3727
3728         tx->sec_ctl = 0;
3729         tx->tx_flags |= htole32(flags);
3730
3731         /* Trim 802.11 header. */
3732         m_adj(m, hdrlen);
3733 #if defined(__DragonFly__)
3734         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3735                                             segs, IWM_MAX_SCATTER - 2,
3736                                             &nsegs, BUS_DMA_NOWAIT);
3737 #else
3738         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3739             segs, &nsegs, BUS_DMA_NOWAIT);
3740 #endif
3741         if (error != 0) {
3742 #if defined(__DragonFly__)
3743                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3744                     error);
3745                 m_freem(m);
3746                 return error;
3747 #else
3748                 if (error != EFBIG) {
3749                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3750                             error);
3751                         m_freem(m);
3752                         return error;
3753                 }
3754                 /* Too many DMA segments, linearize mbuf. */
3755                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3756                 if (m1 == NULL) {
3757                         device_printf(sc->sc_dev,
3758                             "%s: could not defrag mbuf\n", __func__);
3759                         m_freem(m);
3760                         return (ENOBUFS);
3761                 }
3762                 m = m1;
3763
3764                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3765                     segs, &nsegs, BUS_DMA_NOWAIT);
3766                 if (error != 0) {
3767                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3768                             error);
3769                         m_freem(m);
3770                         return error;
3771                 }
3772 #endif
3773         }
3774         data->m = m;
3775         data->in = in;
3776         data->done = 0;
3777
3778         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3779             "sending txd %p, in %p\n", data, data->in);
3780         KASSERT(data->in != NULL, ("node is NULL"));
3781
3782         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3783             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3784             ring->qid, ring->cur, totlen, nsegs,
3785             le32toh(tx->tx_flags),
3786             le32toh(tx->rate_n_flags),
3787             tx->initial_rate_index
3788             );
3789
3790         /* Fill TX descriptor. */
3791         desc->num_tbs = 2 + nsegs;
3792
3793         desc->tbs[0].lo = htole32(data->cmd_paddr);
3794         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3795             (TB0_SIZE << 4);
3796         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3797         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3798             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3799               + hdrlen + pad - TB0_SIZE) << 4);
3800
3801         /* Other DMA segments are for data payload. */
3802         for (i = 0; i < nsegs; i++) {
3803                 seg = &segs[i];
3804                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3805                 desc->tbs[i+2].hi_n_len = \
3806                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3807                     | ((seg->ds_len) << 4);
3808         }
3809
3810         bus_dmamap_sync(ring->data_dmat, data->map,
3811             BUS_DMASYNC_PREWRITE);
3812         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3813             BUS_DMASYNC_PREWRITE);
3814         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3815             BUS_DMASYNC_PREWRITE);
3816
3817 #if 0
3818         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3819 #endif
3820
3821         /* Kick TX ring. */
3822         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3823         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3824
3825         /* Mark TX ring as full if we reach a certain threshold. */
3826         if (++ring->queued > IWM_TX_RING_HIMARK) {
3827                 sc->qfullmsk |= 1 << ring->qid;
3828         }
3829
3830         return 0;
3831 }
3832
3833 static int
3834 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3835     const struct ieee80211_bpf_params *params)
3836 {
3837         struct ieee80211com *ic = ni->ni_ic;
3838         struct iwm_softc *sc = ic->ic_softc;
3839         int error = 0;
3840
3841         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3842             "->%s begin\n", __func__);
3843
3844         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3845                 m_freem(m);
3846                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3847                     "<-%s not RUNNING\n", __func__);
3848                 return (ENETDOWN);
3849         }
3850
3851         IWM_LOCK(sc);
3852         /* XXX fix this */
3853         if (params == NULL) {
3854                 error = iwm_tx(sc, m, ni, 0);
3855         } else {
3856                 error = iwm_tx(sc, m, ni, 0);
3857         }
3858         sc->sc_tx_timer = 5;
3859         IWM_UNLOCK(sc);
3860
3861         return (error);
3862 }
3863
3864 /*
3865  * mvm/tx.c
3866  */
3867
3868 /*
3869  * Note that there are transports that buffer frames before they reach
3870  * the firmware. This means that after flush_tx_path is called, the
3871  * queue might not be empty. The race-free way to handle this is to:
3872  * 1) set the station as draining
3873  * 2) flush the Tx path
3874  * 3) wait for the transport queues to be empty
3875  */
3876 static int
3877 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3878 {
3879         int ret;
3880         struct iwm_tx_path_flush_cmd flush_cmd = {
3881                 .queues_ctl = htole32(tfd_msk),
3882                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3883         };
3884
3885         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3886             sizeof(flush_cmd), &flush_cmd);
3887         if (ret)
3888                 device_printf(sc->sc_dev,
3889                     "Flushing tx queue failed: %d\n", ret);
3890         return ret;
3891 }
3892
3893 static int
3894 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3895         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3896 {
3897         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3898             cmd, status);
3899 }
3900
3901 /* send station add/update command to firmware */
3902 static int
3903 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3904 {
3905         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3906         int ret;
3907         uint32_t status;
3908
3909         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3910
3911         add_sta_cmd.sta_id = IWM_STATION_ID;
3912         add_sta_cmd.mac_id_n_color
3913             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3914                 IWM_DEFAULT_COLOR));
3915         if (!update) {
3916                 int ac;
3917                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3918                         add_sta_cmd.tfd_queue_msk |=
3919                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3920                 }
3921                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3922         }
3923         add_sta_cmd.add_modify = update ? 1 : 0;
3924         add_sta_cmd.station_flags_msk
3925             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3926         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3927         if (update)
3928                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3929
3930         status = IWM_ADD_STA_SUCCESS;
3931         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3932         if (ret)
3933                 return ret;
3934
3935         switch (status) {
3936         case IWM_ADD_STA_SUCCESS:
3937                 break;
3938         default:
3939                 ret = EIO;
3940                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3941                 break;
3942         }
3943
3944         return ret;
3945 }
3946
3947 static int
3948 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3949 {
3950         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3951 }
3952
3953 static int
3954 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3955 {
3956         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3957 }
3958
3959 static int
3960 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3961         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3962 {
3963         struct iwm_mvm_add_sta_cmd_v7 cmd;
3964         int ret;
3965         uint32_t status;
3966
3967         memset(&cmd, 0, sizeof(cmd));
3968         cmd.sta_id = sta->sta_id;
3969         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3970
3971         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3972         cmd.tid_disable_tx = htole16(0xffff);
3973
3974         if (addr)
3975                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3976
3977         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3978         if (ret)
3979                 return ret;
3980
3981         switch (status) {
3982         case IWM_ADD_STA_SUCCESS:
3983                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3984                     "%s: Internal station added.\n", __func__);
3985                 return 0;
3986         default:
3987                 device_printf(sc->sc_dev,
3988                     "%s: Add internal station failed, status=0x%x\n",
3989                     __func__, status);
3990                 ret = EIO;
3991                 break;
3992         }
3993         return ret;
3994 }
3995
3996 static int
3997 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3998 {
3999         int ret;
4000
4001         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4002         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4003
4004         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4005         if (ret)
4006                 return ret;
4007
4008         ret = iwm_mvm_add_int_sta_common(sc,
4009             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4010
4011         if (ret)
4012                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4013         return ret;
4014 }
4015
4016 static int
4017 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4018 {
4019         struct iwm_time_quota_cmd cmd;
4020         int i, idx, ret, num_active_macs, quota, quota_rem;
4021         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4022         int n_ifs[IWM_MAX_BINDINGS] = {0, };
4023         uint16_t id;
4024
4025         memset(&cmd, 0, sizeof(cmd));
4026
4027         /* currently, PHY ID == binding ID */
4028         if (in) {
4029                 id = in->in_phyctxt->id;
4030                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4031                 colors[id] = in->in_phyctxt->color;
4032
4033                 if (1)
4034                         n_ifs[id] = 1;
4035         }
4036
4037         /*
4038          * The FW's scheduling session consists of
4039          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4040          * equally between all the bindings that require quota
4041          */
4042         num_active_macs = 0;
4043         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4044                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4045                 num_active_macs += n_ifs[i];
4046         }
4047
4048         quota = 0;
4049         quota_rem = 0;
4050         if (num_active_macs) {
4051                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4052                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4053         }
4054
4055         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4056                 if (colors[i] < 0)
4057                         continue;
4058
4059                 cmd.quotas[idx].id_and_color =
4060                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4061
4062                 if (n_ifs[i] <= 0) {
4063                         cmd.quotas[idx].quota = htole32(0);
4064                         cmd.quotas[idx].max_duration = htole32(0);
4065                 } else {
4066                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4067                         cmd.quotas[idx].max_duration = htole32(0);
4068                 }
4069                 idx++;
4070         }
4071
4072         /* Give the remainder of the session to the first binding */
4073         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4074
4075         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4076             sizeof(cmd), &cmd);
4077         if (ret)
4078                 device_printf(sc->sc_dev,
4079                     "%s: Failed to send quota: %d\n", __func__, ret);
4080         return ret;
4081 }
4082
4083 /*
4084  * ieee80211 routines
4085  */
4086
4087 /*
4088  * Change to AUTH state in 80211 state machine.  Roughly matches what
4089  * Linux does in bss_info_changed().
4090  */
4091 static int
4092 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4093 {
4094         struct ieee80211_node *ni;
4095         struct iwm_node *in;
4096         struct iwm_vap *iv = IWM_VAP(vap);
4097         uint32_t duration;
4098         int error;
4099
4100         /*
4101          * XXX i have a feeling that the vap node is being
4102          * freed from underneath us. Grr.
4103          */
4104         ni = ieee80211_ref_node(vap->iv_bss);
4105         in = IWM_NODE(ni);
4106         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4107             "%s: called; vap=%p, bss ni=%p\n",
4108             __func__,
4109             vap,
4110             ni);
4111
4112         in->in_assoc = 0;
4113
4114         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4115         if (error != 0)
4116                 return error;
4117
4118         error = iwm_allow_mcast(vap, sc);
4119         if (error) {
4120                 device_printf(sc->sc_dev,
4121                     "%s: failed to set multicast\n", __func__);
4122                 goto out;
4123         }
4124
4125         /*
4126          * This is where it deviates from what Linux does.
4127          *
4128          * Linux iwlwifi doesn't reset the nic each time, nor does it
4129          * call ctxt_add() here.  Instead, it adds it during vap creation,
4130          * and always does a mac_ctx_changed().
4131          *
4132          * The openbsd port doesn't attempt to do that - it reset things
4133          * at odd states and does the add here.
4134          *
4135          * So, until the state handling is fixed (ie, we never reset
4136          * the NIC except for a firmware failure, which should drag
4137          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4138          * contexts that are required), let's do a dirty hack here.
4139          */
4140         if (iv->is_uploaded) {
4141                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4142                         device_printf(sc->sc_dev,
4143                             "%s: failed to update MAC\n", __func__);
4144                         goto out;
4145                 }
4146                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4147                     in->in_ni.ni_chan, 1, 1)) != 0) {
4148                         device_printf(sc->sc_dev,
4149                             "%s: failed update phy ctxt\n", __func__);
4150                         goto out;
4151                 }
4152                 in->in_phyctxt = &sc->sc_phyctxt[0];
4153
4154                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4155                         device_printf(sc->sc_dev,
4156                             "%s: binding update cmd\n", __func__);
4157                         goto out;
4158                 }
4159                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4160                         device_printf(sc->sc_dev,
4161                             "%s: failed to update sta\n", __func__);
4162                         goto out;
4163                 }
4164         } else {
4165                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4166                         device_printf(sc->sc_dev,
4167                             "%s: failed to add MAC\n", __func__);
4168                         goto out;
4169                 }
4170                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4171                     in->in_ni.ni_chan, 1, 1)) != 0) {
4172                         device_printf(sc->sc_dev,
4173                             "%s: failed add phy ctxt!\n", __func__);
4174                         error = ETIMEDOUT;
4175                         goto out;
4176                 }
4177                 in->in_phyctxt = &sc->sc_phyctxt[0];
4178
4179                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4180                         device_printf(sc->sc_dev,
4181                             "%s: binding add cmd\n", __func__);
4182                         goto out;
4183                 }
4184                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4185                         device_printf(sc->sc_dev,
4186                             "%s: failed to add sta\n", __func__);
4187                         goto out;
4188                 }
4189         }
4190
4191         /*
4192          * Prevent the FW from wandering off channel during association
4193          * by "protecting" the session with a time event.
4194          */
4195         /* XXX duration is in units of TU, not MS */
4196         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4197         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4198         DELAY(100);
4199
4200         error = 0;
4201 out:
4202         ieee80211_free_node(ni);
4203         return (error);
4204 }
4205
4206 static int
4207 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4208 {
4209         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4210         int error;
4211
4212         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4213                 device_printf(sc->sc_dev,
4214                     "%s: failed to update STA\n", __func__);
4215                 return error;
4216         }
4217
4218         in->in_assoc = 1;
4219         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4220                 device_printf(sc->sc_dev,
4221                     "%s: failed to update MAC\n", __func__);
4222                 return error;
4223         }
4224
4225         return 0;
4226 }
4227
4228 static int
4229 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4230 {
4231         uint32_t tfd_msk;
4232
4233         /*
4234          * Ok, so *technically* the proper set of calls for going
4235          * from RUN back to SCAN is:
4236          *
4237          * iwm_mvm_power_mac_disable(sc, in);
4238          * iwm_mvm_mac_ctxt_changed(sc, in);
4239          * iwm_mvm_rm_sta(sc, in);
4240          * iwm_mvm_update_quotas(sc, NULL);
4241          * iwm_mvm_mac_ctxt_changed(sc, in);
4242          * iwm_mvm_binding_remove_vif(sc, in);
4243          * iwm_mvm_mac_ctxt_remove(sc, in);
4244          *
4245          * However, that freezes the device not matter which permutations
4246          * and modifications are attempted.  Obviously, this driver is missing
4247          * something since it works in the Linux driver, but figuring out what
4248          * is missing is a little more complicated.  Now, since we're going
4249          * back to nothing anyway, we'll just do a complete device reset.
4250          * Up your's, device!
4251          */
4252         /*
4253          * Just using 0xf for the queues mask is fine as long as we only
4254          * get here from RUN state.
4255          */
4256         tfd_msk = 0xf;
4257         mbufq_drain(&sc->sc_snd);
4258         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4259         /*
4260          * We seem to get away with just synchronously sending the
4261          * IWM_TXPATH_FLUSH command.
4262          */
4263 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4264         iwm_stop_device(sc);
4265         iwm_init_hw(sc);
4266         if (in)
4267                 in->in_assoc = 0;
4268         return 0;
4269
4270 #if 0
4271         int error;
4272
4273         iwm_mvm_power_mac_disable(sc, in);
4274
4275         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4276                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4277                 return error;
4278         }
4279
4280         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4281                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4282                 return error;
4283         }
4284         error = iwm_mvm_rm_sta(sc, in);
4285         in->in_assoc = 0;
4286         iwm_mvm_update_quotas(sc, NULL);
4287         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4288                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4289                 return error;
4290         }
4291         iwm_mvm_binding_remove_vif(sc, in);
4292
4293         iwm_mvm_mac_ctxt_remove(sc, in);
4294
4295         return error;
4296 #endif
4297 }
4298
4299 static struct ieee80211_node *
4300 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4301 {
4302         return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4303             M_INTWAIT | M_ZERO);
4304 }
4305
4306 static void
4307 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4308 {
4309         struct ieee80211_node *ni = &in->in_ni;
4310         struct iwm_lq_cmd *lq = &in->in_lq;
4311         int nrates = ni->ni_rates.rs_nrates;
4312         int i, ridx, tab = 0;
4313         int txant = 0;
4314
4315         if (nrates > nitems(lq->rs_table)) {
4316                 device_printf(sc->sc_dev,
4317                     "%s: node supports %d rates, driver handles "
4318                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4319                 return;
4320         }
4321         if (nrates == 0) {
4322                 device_printf(sc->sc_dev,
4323                     "%s: node supports 0 rates, odd!\n", __func__);
4324                 return;
4325         }
4326
4327         /*
4328          * XXX .. and most of iwm_node is not initialised explicitly;
4329          * it's all just 0x0 passed to the firmware.
4330          */
4331
4332         /* first figure out which rates we should support */
4333         /* XXX TODO: this isn't 11n aware /at all/ */
4334         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4335         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4336             "%s: nrates=%d\n", __func__, nrates);
4337
4338         /*
4339          * Loop over nrates and populate in_ridx from the highest
4340          * rate to the lowest rate.  Remember, in_ridx[] has
4341          * IEEE80211_RATE_MAXSIZE entries!
4342          */
4343         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4344                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4345
4346                 /* Map 802.11 rate to HW rate index. */
4347                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4348                         if (iwm_rates[ridx].rate == rate)
4349                                 break;
4350                 if (ridx > IWM_RIDX_MAX) {
4351                         device_printf(sc->sc_dev,
4352                             "%s: WARNING: device rate for %d not found!\n",
4353                             __func__, rate);
4354                 } else {
4355                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4356                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4357                             __func__,
4358                             i,
4359                             rate,
4360                             ridx);
4361                         in->in_ridx[i] = ridx;
4362                 }
4363         }
4364
4365         /* then construct a lq_cmd based on those */
4366         memset(lq, 0, sizeof(*lq));
4367         lq->sta_id = IWM_STATION_ID;
4368
4369         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4370         if (ni->ni_flags & IEEE80211_NODE_HT)
4371                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4372
4373         /*
4374          * are these used? (we don't do SISO or MIMO)
4375          * need to set them to non-zero, though, or we get an error.
4376          */
4377         lq->single_stream_ant_msk = 1;
4378         lq->dual_stream_ant_msk = 1;
4379
4380         /*
4381          * Build the actual rate selection table.
4382          * The lowest bits are the rates.  Additionally,
4383          * CCK needs bit 9 to be set.  The rest of the bits
4384          * we add to the table select the tx antenna
4385          * Note that we add the rates in the highest rate first
4386          * (opposite of ni_rates).
4387          */
4388         /*
4389          * XXX TODO: this should be looping over the min of nrates
4390          * and LQ_MAX_RETRY_NUM.  Sigh.
4391          */
4392         for (i = 0; i < nrates; i++) {
4393                 int nextant;
4394
4395                 if (txant == 0)
4396                         txant = iwm_mvm_get_valid_tx_ant(sc);
4397                 nextant = 1<<(ffs(txant)-1);
4398                 txant &= ~nextant;
4399
4400                 /*
4401                  * Map the rate id into a rate index into
4402                  * our hardware table containing the
4403                  * configuration to use for this rate.
4404                  */
4405                 ridx = in->in_ridx[i];
4406                 tab = iwm_rates[ridx].plcp;
4407                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4408                 if (IWM_RIDX_IS_CCK(ridx))
4409                         tab |= IWM_RATE_MCS_CCK_MSK;
4410                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4411                     "station rate i=%d, rate=%d, hw=%x\n",
4412                     i, iwm_rates[ridx].rate, tab);
4413                 lq->rs_table[i] = htole32(tab);
4414         }
4415         /* then fill the rest with the lowest possible rate */
4416         for (i = nrates; i < nitems(lq->rs_table); i++) {
4417                 KASSERT(tab != 0, ("invalid tab"));
4418                 lq->rs_table[i] = htole32(tab);
4419         }
4420 }
4421
4422 static int
4423 iwm_media_change(struct ifnet *ifp)
4424 {
4425         struct ieee80211vap *vap = ifp->if_softc;
4426         struct ieee80211com *ic = vap->iv_ic;
4427         struct iwm_softc *sc = ic->ic_softc;
4428         int error;
4429
4430         error = ieee80211_media_change(ifp);
4431         if (error != ENETRESET)
4432                 return error;
4433
4434         IWM_LOCK(sc);
4435         if (ic->ic_nrunning > 0) {
4436                 iwm_stop(sc);
4437                 iwm_init(sc);
4438         }
4439         IWM_UNLOCK(sc);
4440         return error;
4441 }
4442
4443
4444 static int
4445 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4446 {
4447         struct iwm_vap *ivp = IWM_VAP(vap);
4448         struct ieee80211com *ic = vap->iv_ic;
4449         struct iwm_softc *sc = ic->ic_softc;
4450         struct iwm_node *in;
4451         int error;
4452
4453         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4454             "switching state %s -> %s\n",
4455             ieee80211_state_name[vap->iv_state],
4456             ieee80211_state_name[nstate]);
4457         IEEE80211_UNLOCK(ic);
4458         IWM_LOCK(sc);
4459
4460         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4461                 iwm_led_blink_stop(sc);
4462
4463         /* disable beacon filtering if we're hopping out of RUN */
4464         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4465                 iwm_mvm_disable_beacon_filter(sc);
4466
4467                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4468                         in->in_assoc = 0;
4469
4470                 if (nstate == IEEE80211_S_INIT) {
4471                         IWM_UNLOCK(sc);
4472                         IEEE80211_LOCK(ic);
4473                         error = ivp->iv_newstate(vap, nstate, arg);
4474                         IEEE80211_UNLOCK(ic);
4475                         IWM_LOCK(sc);
4476                         iwm_release(sc, NULL);
4477                         IWM_UNLOCK(sc);
4478                         IEEE80211_LOCK(ic);
4479                         return error;
4480                 }
4481
4482                 /*
4483                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4484                  * above then the card will be completely reinitialized,
4485                  * so the driver must do everything necessary to bring the card
4486                  * from INIT to SCAN.
4487                  *
4488                  * Additionally, upon receiving deauth frame from AP,
4489                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4490                  * state. This will also fail with this driver, so bring the FSM
4491                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4492                  *
4493                  * XXX TODO: fix this for FreeBSD!
4494                  */
4495                 if (nstate == IEEE80211_S_SCAN ||
4496                     nstate == IEEE80211_S_AUTH ||
4497                     nstate == IEEE80211_S_ASSOC) {
4498                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4499                             "Force transition to INIT; MGT=%d\n", arg);
4500                         IWM_UNLOCK(sc);
4501                         IEEE80211_LOCK(ic);
4502                         /* Always pass arg as -1 since we can't Tx right now. */
4503                         /*
4504                          * XXX arg is just ignored anyway when transitioning
4505                          *     to IEEE80211_S_INIT.
4506                          */
4507                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4508                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4509                             "Going INIT->SCAN\n");
4510                         nstate = IEEE80211_S_SCAN;
4511                         IEEE80211_UNLOCK(ic);
4512                         IWM_LOCK(sc);
4513                 }
4514         }
4515
4516         switch (nstate) {
4517         case IEEE80211_S_INIT:
4518                 break;
4519
4520         case IEEE80211_S_AUTH:
4521                 if ((error = iwm_auth(vap, sc)) != 0) {
4522                         device_printf(sc->sc_dev,
4523                             "%s: could not move to auth state: %d\n",
4524                             __func__, error);
4525                         break;
4526                 }
4527                 break;
4528
4529         case IEEE80211_S_ASSOC:
4530                 if ((error = iwm_assoc(vap, sc)) != 0) {
4531                         device_printf(sc->sc_dev,
4532                             "%s: failed to associate: %d\n", __func__,
4533                             error);
4534                         break;
4535                 }
4536                 break;
4537
4538         case IEEE80211_S_RUN:
4539         {
4540                 struct iwm_host_cmd cmd = {
4541                         .id = IWM_LQ_CMD,
4542                         .len = { sizeof(in->in_lq), },
4543                         .flags = IWM_CMD_SYNC,
4544                 };
4545
4546                 /* Update the association state, now we have it all */
4547                 /* (eg associd comes in at this point */
4548                 error = iwm_assoc(vap, sc);
4549                 if (error != 0) {
4550                         device_printf(sc->sc_dev,
4551                             "%s: failed to update association state: %d\n",
4552                             __func__,
4553                             error);
4554                         break;
4555                 }
4556
4557                 in = IWM_NODE(vap->iv_bss);
4558                 iwm_mvm_power_mac_update_mode(sc, in);
4559                 iwm_mvm_enable_beacon_filter(sc, in);
4560                 iwm_mvm_update_quotas(sc, in);
4561                 iwm_setrates(sc, in);
4562
4563                 cmd.data[0] = &in->in_lq;
4564                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4565                         device_printf(sc->sc_dev,
4566                             "%s: IWM_LQ_CMD failed\n", __func__);
4567                 }
4568
4569                 iwm_mvm_led_enable(sc);
4570                 break;
4571         }
4572
4573         default:
4574                 break;
4575         }
4576         IWM_UNLOCK(sc);
4577         IEEE80211_LOCK(ic);
4578
4579         return (ivp->iv_newstate(vap, nstate, arg));
4580 }
4581
4582 void
4583 iwm_endscan_cb(void *arg, int pending)
4584 {
4585         struct iwm_softc *sc = arg;
4586         struct ieee80211com *ic = &sc->sc_ic;
4587
4588         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4589             "%s: scan ended\n",
4590             __func__);
4591
4592         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4593 }
4594
4595 /*
4596  * Aging and idle timeouts for the different possible scenarios
4597  * in default configuration
4598  */
4599 static const uint32_t
4600 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4601         {
4602                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4603                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4604         },
4605         {
4606                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4607                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4608         },
4609         {
4610                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4611                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4612         },
4613         {
4614                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4615                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4616         },
4617         {
4618                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4619                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4620         },
4621 };
4622
4623 /*
4624  * Aging and idle timeouts for the different possible scenarios
4625  * in single BSS MAC configuration.
4626  */
4627 static const uint32_t
4628 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4629         {
4630                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4631                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4632         },
4633         {
4634                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4635                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4636         },
4637         {
4638                 htole32(IWM_SF_MCAST_AGING_TIMER),
4639                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4640         },
4641         {
4642                 htole32(IWM_SF_BA_AGING_TIMER),
4643                 htole32(IWM_SF_BA_IDLE_TIMER)
4644         },
4645         {
4646                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4647                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4648         },
4649 };
4650
4651 static void
4652 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4653     struct ieee80211_node *ni)
4654 {
4655         int i, j, watermark;
4656
4657         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4658
4659         /*
4660          * If we are in association flow - check antenna configuration
4661          * capabilities of the AP station, and choose the watermark accordingly.
4662          */
4663         if (ni) {
4664                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4665 #ifdef notyet
4666                         if (ni->ni_rxmcs[2] != 0)
4667                                 watermark = IWM_SF_W_MARK_MIMO3;
4668                         else if (ni->ni_rxmcs[1] != 0)
4669                                 watermark = IWM_SF_W_MARK_MIMO2;
4670                         else
4671 #endif
4672                                 watermark = IWM_SF_W_MARK_SISO;
4673                 } else {
4674                         watermark = IWM_SF_W_MARK_LEGACY;
4675                 }
4676         /* default watermark value for unassociated mode. */
4677         } else {
4678                 watermark = IWM_SF_W_MARK_MIMO2;
4679         }
4680         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4681
4682         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4683                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4684                         sf_cmd->long_delay_timeouts[i][j] =
4685                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4686                 }
4687         }
4688
4689         if (ni) {
4690                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4691                        sizeof(iwm_sf_full_timeout));
4692         } else {
4693                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4694                        sizeof(iwm_sf_full_timeout_def));
4695         }
4696 }
4697
4698 static int
4699 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4700 {
4701         struct ieee80211com *ic = &sc->sc_ic;
4702         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4703         struct iwm_sf_cfg_cmd sf_cmd = {
4704                 .state = htole32(IWM_SF_FULL_ON),
4705         };
4706         int ret = 0;
4707
4708         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4709                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4710
4711         switch (new_state) {
4712         case IWM_SF_UNINIT:
4713         case IWM_SF_INIT_OFF:
4714                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4715                 break;
4716         case IWM_SF_FULL_ON:
4717                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4718                 break;
4719         default:
4720                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4721                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4722                           new_state);
4723                 return EINVAL;
4724         }
4725
4726         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4727                                    sizeof(sf_cmd), &sf_cmd);
4728         return ret;
4729 }
4730
4731 static int
4732 iwm_send_bt_init_conf(struct iwm_softc *sc)
4733 {
4734         struct iwm_bt_coex_cmd bt_cmd;
4735
4736         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4737         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4738
4739         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4740             &bt_cmd);
4741 }
4742
4743 static int
4744 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4745 {
4746         struct iwm_mcc_update_cmd mcc_cmd;
4747         struct iwm_host_cmd hcmd = {
4748                 .id = IWM_MCC_UPDATE_CMD,
4749                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4750                 .data = { &mcc_cmd },
4751         };
4752         int ret;
4753 #ifdef IWM_DEBUG
4754         struct iwm_rx_packet *pkt;
4755         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4756         struct iwm_mcc_update_resp *mcc_resp;
4757         int n_channels;
4758         uint16_t mcc;
4759 #endif
4760         int resp_v2 = isset(sc->sc_enabled_capa,
4761             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4762
4763         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4764         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4765         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4766             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4767                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4768         else
4769                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4770
4771         if (resp_v2)
4772                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4773         else
4774                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4775
4776         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4777             "send MCC update to FW with '%c%c' src = %d\n",
4778             alpha2[0], alpha2[1], mcc_cmd.source_id);
4779
4780         ret = iwm_send_cmd(sc, &hcmd);
4781         if (ret)
4782                 return ret;
4783
4784 #ifdef IWM_DEBUG
4785         pkt = hcmd.resp_pkt;
4786
4787         /* Extract MCC response */
4788         if (resp_v2) {
4789                 mcc_resp = (void *)pkt->data;
4790                 mcc = mcc_resp->mcc;
4791                 n_channels =  le32toh(mcc_resp->n_channels);
4792         } else {
4793                 mcc_resp_v1 = (void *)pkt->data;
4794                 mcc = mcc_resp_v1->mcc;
4795                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4796         }
4797
4798         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4799         if (mcc == 0)
4800                 mcc = 0x3030;  /* "00" - world */
4801
4802         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4803             "regulatory domain '%c%c' (%d channels available)\n",
4804             mcc >> 8, mcc & 0xff, n_channels);
4805 #endif
4806         iwm_free_resp(sc, &hcmd);
4807
4808         return 0;
4809 }
4810
4811 static void
4812 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4813 {
4814         struct iwm_host_cmd cmd = {
4815                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4816                 .len = { sizeof(uint32_t), },
4817                 .data = { &backoff, },
4818         };
4819
4820         if (iwm_send_cmd(sc, &cmd) != 0) {
4821                 device_printf(sc->sc_dev,
4822                     "failed to change thermal tx backoff\n");
4823         }
4824 }
4825
4826 static int
4827 iwm_init_hw(struct iwm_softc *sc)
4828 {
4829         struct ieee80211com *ic = &sc->sc_ic;
4830         int error, i, ac;
4831
4832         if ((error = iwm_start_hw(sc)) != 0) {
4833                 kprintf("iwm_start_hw: failed %d\n", error);
4834                 return error;
4835         }
4836
4837         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4838                 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4839                 return error;
4840         }
4841
4842         /*
4843          * should stop and start HW since that INIT
4844          * image just loaded
4845          */
4846         iwm_stop_device(sc);
4847         if ((error = iwm_start_hw(sc)) != 0) {
4848                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4849                 return error;
4850         }
4851
4852         /* omstart, this time with the regular firmware */
4853         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4854         if (error) {
4855                 device_printf(sc->sc_dev, "could not load firmware\n");
4856                 goto error;
4857         }
4858
4859         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4860                 device_printf(sc->sc_dev, "bt init conf failed\n");
4861                 goto error;
4862         }
4863
4864         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4865         if (error != 0) {
4866                 device_printf(sc->sc_dev, "antenna config failed\n");
4867                 goto error;
4868         }
4869
4870         /* Send phy db control command and then phy db calibration */
4871         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4872                 goto error;
4873
4874         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4875                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4876                 goto error;
4877         }
4878
4879         /* Add auxiliary station for scanning */
4880         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4881                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4882                 goto error;
4883         }
4884
4885         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4886                 /*
4887                  * The channel used here isn't relevant as it's
4888                  * going to be overwritten in the other flows.
4889                  * For now use the first channel we have.
4890                  */
4891                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4892                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4893                         goto error;
4894         }
4895
4896         /* Initialize tx backoffs to the minimum. */
4897         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4898                 iwm_mvm_tt_tx_backoff(sc, 0);
4899
4900         error = iwm_mvm_power_update_device(sc);
4901         if (error)
4902                 goto error;
4903
4904         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4905                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4906                         goto error;
4907         }
4908
4909         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4910                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4911                         goto error;
4912         }
4913
4914         /* Enable Tx queues. */
4915         for (ac = 0; ac < WME_NUM_AC; ac++) {
4916                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4917                     iwm_mvm_ac_to_tx_fifo[ac]);
4918                 if (error)
4919                         goto error;
4920         }
4921
4922         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4923                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4924                 goto error;
4925         }
4926
4927         return 0;
4928
4929  error:
4930         iwm_stop_device(sc);
4931         return error;
4932 }
4933
4934 /* Allow multicast from our BSSID. */
4935 static int
4936 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4937 {
4938         struct ieee80211_node *ni = vap->iv_bss;
4939         struct iwm_mcast_filter_cmd *cmd;
4940         size_t size;
4941         int error;
4942
4943         size = roundup(sizeof(*cmd), 4);
4944         cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4945         if (cmd == NULL)
4946                 return ENOMEM;
4947         cmd->filter_own = 1;
4948         cmd->port_id = 0;
4949         cmd->count = 0;
4950         cmd->pass_all = 1;
4951         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4952
4953         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4954             IWM_CMD_SYNC, size, cmd);
4955         kfree(cmd, M_DEVBUF);
4956
4957         return (error);
4958 }
4959
4960 /*
4961  * ifnet interfaces
4962  */
4963
4964 static void
4965 iwm_init(struct iwm_softc *sc)
4966 {
4967         int error;
4968
4969         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4970                 return;
4971         }
4972         sc->sc_generation++;
4973         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4974
4975         if ((error = iwm_init_hw(sc)) != 0) {
4976                 kprintf("iwm_init_hw failed %d\n", error);
4977                 iwm_stop(sc);
4978                 return;
4979         }
4980
4981         /*
4982          * Ok, firmware loaded and we are jogging
4983          */
4984         sc->sc_flags |= IWM_FLAG_HW_INITED;
4985         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4986 }
4987
4988 static int
4989 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4990 {
4991         struct iwm_softc *sc;
4992         int error;
4993
4994         sc = ic->ic_softc;
4995
4996         IWM_LOCK(sc);
4997         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4998                 IWM_UNLOCK(sc);
4999                 return (ENXIO);
5000         }
5001         error = mbufq_enqueue(&sc->sc_snd, m);
5002         if (error) {
5003                 IWM_UNLOCK(sc);
5004                 return (error);
5005         }
5006         iwm_start(sc);
5007         IWM_UNLOCK(sc);
5008         return (0);
5009 }
5010
5011 /*
5012  * Dequeue packets from sendq and call send.
5013  */
5014 static void
5015 iwm_start(struct iwm_softc *sc)
5016 {
5017         struct ieee80211_node *ni;
5018         struct mbuf *m;
5019         int ac = 0;
5020
5021         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5022         while (sc->qfullmsk == 0 &&
5023                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5024                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5025                 if (iwm_tx(sc, m, ni, ac) != 0) {
5026                         if_inc_counter(ni->ni_vap->iv_ifp,
5027                             IFCOUNTER_OERRORS, 1);
5028                         ieee80211_free_node(ni);
5029                         continue;
5030                 }
5031                 sc->sc_tx_timer = 15;
5032         }
5033         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5034 }
5035
5036 static void
5037 iwm_stop(struct iwm_softc *sc)
5038 {
5039
5040         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5041         sc->sc_flags |= IWM_FLAG_STOPPED;
5042         sc->sc_generation++;
5043         iwm_led_blink_stop(sc);
5044         sc->sc_tx_timer = 0;
5045         iwm_stop_device(sc);
5046         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5047 }
5048
5049 static void
5050 iwm_watchdog(void *arg)
5051 {
5052         struct iwm_softc *sc = arg;
5053
5054         if (sc->sc_tx_timer > 0) {
5055                 if (--sc->sc_tx_timer == 0) {
5056                         device_printf(sc->sc_dev, "device timeout\n");
5057 #ifdef IWM_DEBUG
5058                         iwm_nic_error(sc);
5059 #endif
5060                         iwm_stop(sc);
5061 #if defined(__DragonFly__)
5062                         ++sc->sc_ic.ic_oerrors;
5063 #else
5064                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5065 #endif
5066                         return;
5067                 }
5068         }
5069         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5070 }
5071
5072 static void
5073 iwm_parent(struct ieee80211com *ic)
5074 {
5075         struct iwm_softc *sc = ic->ic_softc;
5076         int startall = 0;
5077
5078         IWM_LOCK(sc);
5079         if (ic->ic_nrunning > 0) {
5080                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5081                         iwm_init(sc);
5082                         startall = 1;
5083                 }
5084         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5085                 iwm_stop(sc);
5086         IWM_UNLOCK(sc);
5087         if (startall)
5088                 ieee80211_start_all(ic);
5089 }
5090
5091 /*
5092  * The interrupt side of things
5093  */
5094
5095 /*
5096  * error dumping routines are from iwlwifi/mvm/utils.c
5097  */
5098
5099 /*
5100  * Note: This structure is read from the device with IO accesses,
5101  * and the reading already does the endian conversion. As it is
5102  * read with uint32_t-sized accesses, any members with a different size
5103  * need to be ordered correctly though!
5104  */
5105 struct iwm_error_event_table {
5106         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5107         uint32_t error_id;              /* type of error */
5108         uint32_t trm_hw_status0;        /* TRM HW status */
5109         uint32_t trm_hw_status1;        /* TRM HW status */
5110         uint32_t blink2;                /* branch link */
5111         uint32_t ilink1;                /* interrupt link */
5112         uint32_t ilink2;                /* interrupt link */
5113         uint32_t data1;         /* error-specific data */
5114         uint32_t data2;         /* error-specific data */
5115         uint32_t data3;         /* error-specific data */
5116         uint32_t bcon_time;             /* beacon timer */
5117         uint32_t tsf_low;               /* network timestamp function timer */
5118         uint32_t tsf_hi;                /* network timestamp function timer */
5119         uint32_t gp1;           /* GP1 timer register */
5120         uint32_t gp2;           /* GP2 timer register */
5121         uint32_t fw_rev_type;   /* firmware revision type */
5122         uint32_t major;         /* uCode version major */
5123         uint32_t minor;         /* uCode version minor */
5124         uint32_t hw_ver;                /* HW Silicon version */
5125         uint32_t brd_ver;               /* HW board version */
5126         uint32_t log_pc;                /* log program counter */
5127         uint32_t frame_ptr;             /* frame pointer */
5128         uint32_t stack_ptr;             /* stack pointer */
5129         uint32_t hcmd;          /* last host command header */
5130         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5131                                  * rxtx_flag */
5132         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5133                                  * host_flag */
5134         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5135                                  * enc_flag */
5136         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5137                                  * time_flag */
5138         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5139                                  * wico interrupt */
5140         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5141         uint32_t wait_event;            /* wait event() caller address */
5142         uint32_t l2p_control;   /* L2pControlField */
5143         uint32_t l2p_duration;  /* L2pDurationField */
5144         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5145         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5146         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5147                                  * (LMPM_PMG_SEL) */
5148         uint32_t u_timestamp;   /* indicate when the date and time of the
5149                                  * compilation */
5150         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5151 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5152
5153 /*
5154  * UMAC error struct - relevant starting from family 8000 chip.
5155  * Note: This structure is read from the device with IO accesses,
5156  * and the reading already does the endian conversion. As it is
5157  * read with u32-sized accesses, any members with a different size
5158  * need to be ordered correctly though!
5159  */
5160 struct iwm_umac_error_event_table {
5161         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5162         uint32_t error_id;      /* type of error */
5163         uint32_t blink1;        /* branch link */
5164         uint32_t blink2;        /* branch link */
5165         uint32_t ilink1;        /* interrupt link */
5166         uint32_t ilink2;        /* interrupt link */
5167         uint32_t data1;         /* error-specific data */
5168         uint32_t data2;         /* error-specific data */
5169         uint32_t data3;         /* error-specific data */
5170         uint32_t umac_major;
5171         uint32_t umac_minor;
5172         uint32_t frame_pointer; /* core register 27*/
5173         uint32_t stack_pointer; /* core register 28 */
5174         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5175         uint32_t nic_isr_pref;  /* ISR status register */
5176 } __packed;
5177
5178 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5179 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5180
5181 #ifdef IWM_DEBUG
5182 struct {
5183         const char *name;
5184         uint8_t num;
5185 } advanced_lookup[] = {
5186         { "NMI_INTERRUPT_WDG", 0x34 },
5187         { "SYSASSERT", 0x35 },
5188         { "UCODE_VERSION_MISMATCH", 0x37 },
5189         { "BAD_COMMAND", 0x38 },
5190         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5191         { "FATAL_ERROR", 0x3D },
5192         { "NMI_TRM_HW_ERR", 0x46 },
5193         { "NMI_INTERRUPT_TRM", 0x4C },
5194         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5195         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5196         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5197         { "NMI_INTERRUPT_HOST", 0x66 },
5198         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5199         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5200         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5201         { "ADVANCED_SYSASSERT", 0 },
5202 };
5203
5204 static const char *
5205 iwm_desc_lookup(uint32_t num)
5206 {
5207         int i;
5208
5209         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5210                 if (advanced_lookup[i].num == num)
5211                         return advanced_lookup[i].name;
5212
5213         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5214         return advanced_lookup[i].name;
5215 }
5216
5217 static void
5218 iwm_nic_umac_error(struct iwm_softc *sc)
5219 {
5220         struct iwm_umac_error_event_table table;
5221         uint32_t base;
5222
5223         base = sc->umac_error_event_table;
5224
5225         if (base < 0x800000) {
5226                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5227                     base);
5228                 return;
5229         }
5230
5231         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5232                 device_printf(sc->sc_dev, "reading errlog failed\n");
5233                 return;
5234         }
5235
5236         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5237                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5238                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5239                     sc->sc_flags, table.valid);
5240         }
5241
5242         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5243                 iwm_desc_lookup(table.error_id));
5244         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5245         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5246         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5247             table.ilink1);
5248         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5249             table.ilink2);
5250         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5251         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5252         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5253         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5254         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5255         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5256             table.frame_pointer);
5257         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5258             table.stack_pointer);
5259         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5260         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5261             table.nic_isr_pref);
5262 }
5263
5264 /*
5265  * Support for dumping the error log seemed like a good idea ...
5266  * but it's mostly hex junk and the only sensible thing is the
5267  * hw/ucode revision (which we know anyway).  Since it's here,
5268  * I'll just leave it in, just in case e.g. the Intel guys want to
5269  * help us decipher some "ADVANCED_SYSASSERT" later.
5270  */
5271 static void
5272 iwm_nic_error(struct iwm_softc *sc)
5273 {
5274         struct iwm_error_event_table table;
5275         uint32_t base;
5276
5277         device_printf(sc->sc_dev, "dumping device error log\n");
5278         base = sc->error_event_table;
5279         if (base < 0x800000) {
5280                 device_printf(sc->sc_dev,
5281                     "Invalid error log pointer 0x%08x\n", base);
5282                 return;
5283         }
5284
5285         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5286                 device_printf(sc->sc_dev, "reading errlog failed\n");
5287                 return;
5288         }
5289
5290         if (!table.valid) {
5291                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5292                 return;
5293         }
5294
5295         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5296                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5297                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5298                     sc->sc_flags, table.valid);
5299         }
5300
5301         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5302             iwm_desc_lookup(table.error_id));
5303         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5304             table.trm_hw_status0);
5305         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5306             table.trm_hw_status1);
5307         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5308         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5309         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5310         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5311         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5312         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5313         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5314         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5315         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5316         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5317         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5318         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5319             table.fw_rev_type);
5320         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5321         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5322         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5323         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5324         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5325         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5326         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5327         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5328         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5329         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5330         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5331         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5332         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5333         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5334         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5335         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5336         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5337         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5338         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5339
5340         if (sc->umac_error_event_table)
5341                 iwm_nic_umac_error(sc);
5342 }
5343 #endif
5344
5345 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5346
5347 /*
5348  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5349  * Basic structure from if_iwn
5350  */
5351 static void
5352 iwm_notif_intr(struct iwm_softc *sc)
5353 {
5354         struct ieee80211com *ic = &sc->sc_ic;
5355         uint16_t hw;
5356
5357         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5358             BUS_DMASYNC_POSTREAD);
5359
5360         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5361
5362         /*
5363          * Process responses
5364          */
5365         while (sc->rxq.cur != hw) {
5366                 struct iwm_rx_ring *ring = &sc->rxq;
5367                 struct iwm_rx_data *data = &ring->data[ring->cur];
5368                 struct iwm_rx_packet *pkt;
5369                 struct iwm_cmd_response *cresp;
5370                 int qid, idx, code;
5371
5372                 bus_dmamap_sync(ring->data_dmat, data->map,
5373                     BUS_DMASYNC_POSTREAD);
5374                 pkt = mtod(data->m, struct iwm_rx_packet *);
5375
5376                 qid = pkt->hdr.qid & ~0x80;
5377                 idx = pkt->hdr.idx;
5378
5379                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5380                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5381                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5382                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5383
5384                 /*
5385                  * randomly get these from the firmware, no idea why.
5386                  * they at least seem harmless, so just ignore them for now
5387                  */
5388                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5389                     || pkt->len_n_flags == htole32(0x55550000))) {
5390                         ADVANCE_RXQ(sc);
5391                         continue;
5392                 }
5393
5394                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5395
5396                 switch (code) {
5397                 case IWM_REPLY_RX_PHY_CMD:
5398                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5399                         break;
5400
5401                 case IWM_REPLY_RX_MPDU_CMD:
5402                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5403                         break;
5404
5405                 case IWM_TX_CMD:
5406                         iwm_mvm_rx_tx_cmd(sc, pkt);
5407                         break;
5408
5409                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5410                         struct iwm_missed_beacons_notif *resp;
5411                         int missed;
5412
5413                         /* XXX look at mac_id to determine interface ID */
5414                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5415
5416                         resp = (void *)pkt->data;
5417                         missed = le32toh(resp->consec_missed_beacons);
5418
5419                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5420                             "%s: MISSED_BEACON: mac_id=%d, "
5421                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5422                             "num_rx=%d\n",
5423                             __func__,
5424                             le32toh(resp->mac_id),
5425                             le32toh(resp->consec_missed_beacons_since_last_rx),
5426                             le32toh(resp->consec_missed_beacons),
5427                             le32toh(resp->num_expected_beacons),
5428                             le32toh(resp->num_recvd_beacons));
5429
5430                         /* Be paranoid */
5431                         if (vap == NULL)
5432                                 break;
5433
5434                         /* XXX no net80211 locking? */
5435                         if (vap->iv_state == IEEE80211_S_RUN &&
5436                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5437                                 if (missed > vap->iv_bmissthreshold) {
5438                                         /* XXX bad locking; turn into task */
5439                                         IWM_UNLOCK(sc);
5440                                         ieee80211_beacon_miss(ic);
5441                                         IWM_LOCK(sc);
5442                                 }
5443                         }
5444
5445                         break; }
5446
5447                 case IWM_MFUART_LOAD_NOTIFICATION:
5448                         break;
5449
5450                 case IWM_MVM_ALIVE:
5451                         break;
5452
5453                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5454                         break;
5455
5456                 case IWM_STATISTICS_NOTIFICATION: {
5457                         struct iwm_notif_statistics *stats;
5458                         stats = (void *)pkt->data;
5459                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5460                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5461                         break; }
5462
5463                 case IWM_NVM_ACCESS_CMD:
5464                 case IWM_MCC_UPDATE_CMD:
5465                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5466                                 memcpy(sc->sc_cmd_resp,
5467                                     pkt, sizeof(sc->sc_cmd_resp));
5468                         }
5469                         break;
5470
5471                 case IWM_MCC_CHUB_UPDATE_CMD: {
5472                         struct iwm_mcc_chub_notif *notif;
5473                         notif = (void *)pkt->data;
5474
5475                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5476                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5477                         sc->sc_fw_mcc[2] = '\0';
5478                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5479                             "fw source %d sent CC '%s'\n",
5480                             notif->source_id, sc->sc_fw_mcc);
5481                         break; }
5482
5483                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5484                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5485                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5486                         struct iwm_dts_measurement_notif_v1 *notif;
5487
5488                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5489                                 device_printf(sc->sc_dev,
5490                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5491                                 break;
5492                         }
5493                         notif = (void *)pkt->data;
5494                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5495                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5496                             notif->temp);
5497                         break;
5498                 }
5499
5500                 case IWM_PHY_CONFIGURATION_CMD:
5501                 case IWM_TX_ANT_CONFIGURATION_CMD:
5502                 case IWM_ADD_STA:
5503                 case IWM_MAC_CONTEXT_CMD:
5504                 case IWM_REPLY_SF_CFG_CMD:
5505                 case IWM_POWER_TABLE_CMD:
5506                 case IWM_PHY_CONTEXT_CMD:
5507                 case IWM_BINDING_CONTEXT_CMD:
5508                 case IWM_TIME_EVENT_CMD:
5509                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5510                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5511                 case IWM_SCAN_ABORT_UMAC:
5512                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5513                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5514                 case IWM_REPLY_BEACON_FILTERING_CMD:
5515                 case IWM_MAC_PM_POWER_TABLE:
5516                 case IWM_TIME_QUOTA_CMD:
5517                 case IWM_REMOVE_STA:
5518                 case IWM_TXPATH_FLUSH:
5519                 case IWM_LQ_CMD:
5520                 case IWM_FW_PAGING_BLOCK_CMD:
5521                 case IWM_BT_CONFIG:
5522                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5523                         cresp = (void *)pkt->data;
5524                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5525                                 memcpy(sc->sc_cmd_resp,
5526                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5527                         }
5528                         break;
5529
5530                 /* ignore */
5531                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5532                         break;
5533
5534                 case IWM_INIT_COMPLETE_NOTIF:
5535                         break;
5536
5537                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5538                         struct iwm_periodic_scan_complete *notif;
5539                         notif = (void *)pkt->data;
5540
5541                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5542                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5543                                 ieee80211_runtask(ic, &sc->sc_es_task);
5544                         }
5545                         break;
5546                 }
5547
5548                 case IWM_SCAN_ITERATION_COMPLETE: {
5549                         struct iwm_lmac_scan_complete_notif *notif;
5550                         notif = (void *)pkt->data;
5551                         break;
5552                 }
5553
5554                 case IWM_SCAN_COMPLETE_UMAC: {
5555                         struct iwm_umac_scan_complete *notif;
5556                         notif = (void *)pkt->data;
5557
5558                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5559                             "UMAC scan complete, status=0x%x\n",
5560                             notif->status);
5561
5562                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5563                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5564                                 ieee80211_runtask(ic, &sc->sc_es_task);
5565                         }
5566                         break;
5567                 }
5568
5569                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5570                         struct iwm_umac_scan_iter_complete_notif *notif;
5571                         notif = (void *)pkt->data;
5572
5573                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5574                             "complete, status=0x%x, %d channels scanned\n",
5575                             notif->status, notif->scanned_channels);
5576                         break;
5577                 }
5578
5579                 case IWM_REPLY_ERROR: {
5580                         struct iwm_error_resp *resp;
5581                         resp = (void *)pkt->data;
5582
5583                         device_printf(sc->sc_dev,
5584                             "firmware error 0x%x, cmd 0x%x\n",
5585                             le32toh(resp->error_type),
5586                             resp->cmd_id);
5587                         break; }
5588
5589                 case IWM_TIME_EVENT_NOTIFICATION: {
5590                         struct iwm_time_event_notif *notif;
5591                         notif = (void *)pkt->data;
5592
5593                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5594                             "TE notif status = 0x%x action = 0x%x\n",
5595                             notif->status, notif->action);
5596                         break; }
5597
5598                 case IWM_MCAST_FILTER_CMD:
5599                         break;
5600
5601                 case IWM_SCD_QUEUE_CFG: {
5602                         struct iwm_scd_txq_cfg_rsp *rsp;
5603                         rsp = (void *)pkt->data;
5604
5605                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5606                             "queue cfg token=0x%x sta_id=%d "
5607                             "tid=%d scd_queue=%d\n",
5608                             rsp->token, rsp->sta_id, rsp->tid,
5609                             rsp->scd_queue);
5610                         break;
5611                 }
5612
5613                 default:
5614                         device_printf(sc->sc_dev,
5615                             "frame %d/%d %x UNHANDLED (this should "
5616                             "not happen)\n", qid, idx,
5617                             pkt->len_n_flags);
5618                         break;
5619                 }
5620
5621                 /*
5622                  * Why test bit 0x80?  The Linux driver:
5623                  *
5624                  * There is one exception:  uCode sets bit 15 when it
5625                  * originates the response/notification, i.e. when the
5626                  * response/notification is not a direct response to a
5627                  * command sent by the driver.  For example, uCode issues
5628                  * IWM_REPLY_RX when it sends a received frame to the driver;
5629                  * it is not a direct response to any driver command.
5630                  *
5631                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5632                  * uses a slightly different format for pkt->hdr, and "qid"
5633                  * is actually the upper byte of a two-byte field.
5634                  */
5635                 if (!(pkt->hdr.qid & (1 << 7))) {
5636                         iwm_cmd_done(sc, pkt);
5637                 }
5638
5639                 ADVANCE_RXQ(sc);
5640         }
5641
5642         /*
5643          * Tell the firmware what we have processed.
5644          * Seems like the hardware gets upset unless we align
5645          * the write by 8??
5646          */
5647         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5648         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5649 }
5650
5651 static void
5652 iwm_intr(void *arg)
5653 {
5654         struct iwm_softc *sc = arg;
5655         int handled = 0;
5656         int r1, r2, rv = 0;
5657         int isperiodic = 0;
5658
5659 #if defined(__DragonFly__)
5660         if (sc->sc_mem == NULL) {
5661                 kprintf("iwm_intr: detached\n");
5662                 return;
5663         }
5664 #endif
5665         IWM_LOCK(sc);
5666         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5667
5668         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5669                 uint32_t *ict = sc->ict_dma.vaddr;
5670                 int tmp;
5671
5672                 tmp = htole32(ict[sc->ict_cur]);
5673                 if (!tmp)
5674                         goto out_ena;
5675
5676                 /*
5677                  * ok, there was something.  keep plowing until we have all.
5678                  */
5679                 r1 = r2 = 0;
5680                 while (tmp) {
5681                         r1 |= tmp;
5682                         ict[sc->ict_cur] = 0;
5683                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5684                         tmp = htole32(ict[sc->ict_cur]);
5685                 }
5686
5687                 /* this is where the fun begins.  don't ask */
5688                 if (r1 == 0xffffffff)
5689                         r1 = 0;
5690
5691                 /* i am not expected to understand this */
5692                 if (r1 & 0xc0000)
5693                         r1 |= 0x8000;
5694                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5695         } else {
5696                 r1 = IWM_READ(sc, IWM_CSR_INT);
5697                 /* "hardware gone" (where, fishing?) */
5698                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5699                         goto out;
5700                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5701         }
5702         if (r1 == 0 && r2 == 0) {
5703                 goto out_ena;
5704         }
5705
5706         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5707
5708         /* Safely ignore these bits for debug checks below */
5709         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5710
5711         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5712                 int i;
5713                 struct ieee80211com *ic = &sc->sc_ic;
5714                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5715
5716 #ifdef IWM_DEBUG
5717                 iwm_nic_error(sc);
5718 #endif
5719                 /* Dump driver status (TX and RX rings) while we're here. */
5720                 device_printf(sc->sc_dev, "driver status:\n");
5721                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5722                         struct iwm_tx_ring *ring = &sc->txq[i];
5723                         device_printf(sc->sc_dev,
5724                             "  tx ring %2d: qid=%-2d cur=%-3d "
5725                             "queued=%-3d\n",
5726                             i, ring->qid, ring->cur, ring->queued);
5727                 }
5728                 device_printf(sc->sc_dev,
5729                     "  rx ring: cur=%d\n", sc->rxq.cur);
5730                 device_printf(sc->sc_dev,
5731                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5732
5733                 /* Don't stop the device; just do a VAP restart */
5734                 IWM_UNLOCK(sc);
5735
5736                 if (vap == NULL) {
5737                         kprintf("%s: null vap\n", __func__);
5738                         return;
5739                 }
5740
5741                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5742                     "restarting\n", __func__, vap->iv_state);
5743
5744                 /* XXX TODO: turn this into a callout/taskqueue */
5745                 ieee80211_restart_all(ic);
5746                 return;
5747         }
5748
5749         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5750                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5751                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5752                 iwm_stop(sc);
5753                 rv = 1;
5754                 goto out;
5755         }
5756
5757         /* firmware chunk loaded */
5758         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5759                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5760                 handled |= IWM_CSR_INT_BIT_FH_TX;
5761                 sc->sc_fw_chunk_done = 1;
5762                 wakeup(&sc->sc_fw);
5763         }
5764
5765         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5766                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5767                 if (iwm_check_rfkill(sc)) {
5768                         device_printf(sc->sc_dev,
5769                             "%s: rfkill switch, disabling interface\n",
5770                             __func__);
5771                         iwm_stop(sc);
5772                 }
5773         }
5774
5775         /*
5776          * The Linux driver uses periodic interrupts to avoid races.
5777          * We cargo-cult like it's going out of fashion.
5778          */
5779         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5780                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5781                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5782                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5783                         IWM_WRITE_1(sc,
5784                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5785                 isperiodic = 1;
5786         }
5787
5788         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5789                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5790                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5791
5792                 iwm_notif_intr(sc);
5793
5794                 /* enable periodic interrupt, see above */
5795                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5796                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5797                             IWM_CSR_INT_PERIODIC_ENA);
5798         }
5799
5800         if (__predict_false(r1 & ~handled))
5801                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5802                     "%s: unhandled interrupts: %x\n", __func__, r1);
5803         rv = 1;
5804
5805  out_ena:
5806         iwm_restore_interrupts(sc);
5807  out:
5808         IWM_UNLOCK(sc);
5809         return;
5810 }
5811
5812 /*
5813  * Autoconf glue-sniffing
5814  */
5815 #define PCI_VENDOR_INTEL                0x8086
5816 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5817 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5818 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5819 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5820 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5821 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5822 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5823 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5824 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5825 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5826
5827 static const struct iwm_devices {
5828         uint16_t                device;
5829         const struct iwm_cfg    *cfg;
5830 } iwm_devices[] = {
5831         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5832         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5833         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5834         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5835         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5836         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5837         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5838         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5839         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5840         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5841 };
5842
5843 static int
5844 iwm_probe(device_t dev)
5845 {
5846         int i;
5847
5848         for (i = 0; i < nitems(iwm_devices); i++) {
5849                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5850                     pci_get_device(dev) == iwm_devices[i].device) {
5851                         device_set_desc(dev, iwm_devices[i].cfg->name);
5852                         return (BUS_PROBE_DEFAULT);
5853                 }
5854         }
5855
5856         return (ENXIO);
5857 }
5858
5859 static int
5860 iwm_dev_check(device_t dev)
5861 {
5862         struct iwm_softc *sc;
5863         uint16_t devid;
5864         int i;
5865
5866         sc = device_get_softc(dev);
5867
5868         devid = pci_get_device(dev);
5869         for (i = 0; i < NELEM(iwm_devices); i++) {
5870                 if (iwm_devices[i].device == devid) {
5871                         sc->cfg = iwm_devices[i].cfg;
5872                         return (0);
5873                 }
5874         }
5875         device_printf(dev, "unknown adapter type\n");
5876         return ENXIO;
5877 }
5878
5879 /* PCI registers */
5880 #define PCI_CFG_RETRY_TIMEOUT   0x041
5881
5882 static int
5883 iwm_pci_attach(device_t dev)
5884 {
5885         struct iwm_softc *sc;
5886         int count, error, rid;
5887         uint16_t reg;
5888 #if defined(__DragonFly__)
5889         int irq_flags;
5890 #endif
5891
5892         sc = device_get_softc(dev);
5893
5894         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5895          * PCI Tx retries from interfering with C3 CPU state */
5896         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5897
5898         /* Enable bus-mastering and hardware bug workaround. */
5899         pci_enable_busmaster(dev);
5900         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5901         /* if !MSI */
5902         if (reg & PCIM_STATUS_INTxSTATE) {
5903                 reg &= ~PCIM_STATUS_INTxSTATE;
5904         }
5905         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5906
5907         rid = PCIR_BAR(0);
5908         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5909             RF_ACTIVE);
5910         if (sc->sc_mem == NULL) {
5911                 device_printf(sc->sc_dev, "can't map mem space\n");
5912                 return (ENXIO);
5913         }
5914         sc->sc_st = rman_get_bustag(sc->sc_mem);
5915         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5916
5917         /* Install interrupt handler. */
5918         count = 1;
5919         rid = 0;
5920 #if defined(__DragonFly__)
5921         pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5922         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5923 #else
5924         if (pci_alloc_msi(dev, &count) == 0)
5925                 rid = 1;
5926         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5927             (rid != 0 ? 0 : RF_SHAREABLE));
5928 #endif
5929         if (sc->sc_irq == NULL) {
5930                 device_printf(dev, "can't map interrupt\n");
5931                         return (ENXIO);
5932         }
5933 #if defined(__DragonFly__)
5934         error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5935                                iwm_intr, sc, &sc->sc_ih,
5936                                &wlan_global_serializer);
5937 #else
5938         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5939             NULL, iwm_intr, sc, &sc->sc_ih);
5940 #endif
5941         if (sc->sc_ih == NULL) {
5942                 device_printf(dev, "can't establish interrupt");
5943 #if defined(__DragonFly__)
5944                 pci_release_msi(dev);
5945 #endif
5946                         return (ENXIO);
5947         }
5948         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5949
5950         return (0);
5951 }
5952
5953 static void
5954 iwm_pci_detach(device_t dev)
5955 {
5956         struct iwm_softc *sc = device_get_softc(dev);
5957
5958         if (sc->sc_irq != NULL) {
5959                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5960                 bus_release_resource(dev, SYS_RES_IRQ,
5961                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5962                 pci_release_msi(dev);
5963 #if defined(__DragonFly__)
5964                 sc->sc_irq = NULL;
5965 #endif
5966         }
5967         if (sc->sc_mem != NULL) {
5968                 bus_release_resource(dev, SYS_RES_MEMORY,
5969                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5970 #if defined(__DragonFly__)
5971                 sc->sc_mem = NULL;
5972 #endif
5973         }
5974 }
5975
5976
5977
5978 static int
5979 iwm_attach(device_t dev)
5980 {
5981         struct iwm_softc *sc = device_get_softc(dev);
5982         struct ieee80211com *ic = &sc->sc_ic;
5983         int error;
5984         int txq_i, i;
5985
5986         sc->sc_dev = dev;
5987         sc->sc_attached = 1;
5988         IWM_LOCK_INIT(sc);
5989         mbufq_init(&sc->sc_snd, ifqmaxlen);
5990 #if defined(__DragonFly__)
5991         callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5992 #else
5993         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5994 #endif
5995         callout_init(&sc->sc_led_blink_to);
5996         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5997
5998         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5999         if (sc->sc_notif_wait == NULL) {
6000                 device_printf(dev, "failed to init notification wait struct\n");
6001                 goto fail;
6002         }
6003
6004         /* Init phy db */
6005         sc->sc_phy_db = iwm_phy_db_init(sc);
6006         if (!sc->sc_phy_db) {
6007                 device_printf(dev, "Cannot init phy_db\n");
6008                 goto fail;
6009         }
6010
6011         /* PCI attach */
6012         error = iwm_pci_attach(dev);
6013         if (error != 0)
6014                 goto fail;
6015
6016         sc->sc_wantresp = -1;
6017
6018         /* Check device type */
6019         error = iwm_dev_check(dev);
6020         if (error != 0)
6021                 goto fail;
6022
6023         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6024         /*
6025          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6026          * changed, and now the revision step also includes bit 0-1 (no more
6027          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6028          * in the old format.
6029          */
6030         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6031                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6032                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6033
6034         if (iwm_prepare_card_hw(sc) != 0) {
6035                 device_printf(dev, "could not initialize hardware\n");
6036                 goto fail;
6037         }
6038
6039         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6040                 int ret;
6041                 uint32_t hw_step;
6042
6043                 /*
6044                  * In order to recognize C step the driver should read the
6045                  * chip version id located at the AUX bus MISC address.
6046                  */
6047                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6048                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6049                 DELAY(2);
6050
6051                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6052                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6053                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6054                                    25000);
6055                 if (!ret) {
6056                         device_printf(sc->sc_dev,
6057                             "Failed to wake up the nic\n");
6058                         goto fail;
6059                 }
6060
6061                 if (iwm_nic_lock(sc)) {
6062                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6063                         hw_step |= IWM_ENABLE_WFPM;
6064                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6065                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6066                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6067                         if (hw_step == 0x3)
6068                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6069                                                 (IWM_SILICON_C_STEP << 2);
6070                         iwm_nic_unlock(sc);
6071                 } else {
6072                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6073                         goto fail;
6074                 }
6075         }
6076
6077         /* special-case 7265D, it has the same PCI IDs. */
6078         if (sc->cfg == &iwm7265_cfg &&
6079             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6080                 sc->cfg = &iwm7265d_cfg;
6081         }
6082
6083         /* Allocate DMA memory for firmware transfers. */
6084         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6085                 device_printf(dev, "could not allocate memory for firmware\n");
6086                 goto fail;
6087         }
6088
6089         /* Allocate "Keep Warm" page. */
6090         if ((error = iwm_alloc_kw(sc)) != 0) {
6091                 device_printf(dev, "could not allocate keep warm page\n");
6092                 goto fail;
6093         }
6094
6095         /* We use ICT interrupts */
6096         if ((error = iwm_alloc_ict(sc)) != 0) {
6097                 device_printf(dev, "could not allocate ICT table\n");
6098                 goto fail;
6099         }
6100
6101         /* Allocate TX scheduler "rings". */
6102         if ((error = iwm_alloc_sched(sc)) != 0) {
6103                 device_printf(dev, "could not allocate TX scheduler rings\n");
6104                 goto fail;
6105         }
6106
6107         /* Allocate TX rings */
6108         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6109                 if ((error = iwm_alloc_tx_ring(sc,
6110                     &sc->txq[txq_i], txq_i)) != 0) {
6111                         device_printf(dev,
6112                             "could not allocate TX ring %d\n",
6113                             txq_i);
6114                         goto fail;
6115                 }
6116         }
6117
6118         /* Allocate RX ring. */
6119         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6120                 device_printf(dev, "could not allocate RX ring\n");
6121                 goto fail;
6122         }
6123
6124         /* Clear pending interrupts. */
6125         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6126
6127         ic->ic_softc = sc;
6128         ic->ic_name = device_get_nameunit(sc->sc_dev);
6129         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6130         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6131
6132         /* Set device capabilities. */
6133         ic->ic_caps =
6134             IEEE80211_C_STA |
6135             IEEE80211_C_WPA |           /* WPA/RSN */
6136             IEEE80211_C_WME |
6137             IEEE80211_C_SHSLOT |        /* short slot time supported */
6138             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6139 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6140             ;
6141         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6142                 sc->sc_phyctxt[i].id = i;
6143                 sc->sc_phyctxt[i].color = 0;
6144                 sc->sc_phyctxt[i].ref = 0;
6145                 sc->sc_phyctxt[i].channel = NULL;
6146         }
6147
6148         /* Default noise floor */
6149         sc->sc_noise = -96;
6150
6151         /* Max RSSI */
6152         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6153
6154         sc->sc_preinit_hook.ich_func = iwm_preinit;
6155         sc->sc_preinit_hook.ich_arg = sc;
6156         sc->sc_preinit_hook.ich_desc = "iwm";
6157         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6158                 device_printf(dev, "config_intrhook_establish failed\n");
6159                 goto fail;
6160         }
6161
6162 #ifdef IWM_DEBUG
6163         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6164             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6165             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6166 #endif
6167
6168         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6169             "<-%s\n", __func__);
6170
6171         return 0;
6172
6173         /* Free allocated memory if something failed during attachment. */
6174 fail:
6175         iwm_detach_local(sc, 0);
6176
6177         return ENXIO;
6178 }
6179
6180 static int
6181 iwm_is_valid_ether_addr(uint8_t *addr)
6182 {
6183         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6184
6185         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6186                 return (FALSE);
6187
6188         return (TRUE);
6189 }
6190
6191 static int
6192 iwm_update_edca(struct ieee80211com *ic)
6193 {
6194         struct iwm_softc *sc = ic->ic_softc;
6195
6196         device_printf(sc->sc_dev, "%s: called\n", __func__);
6197         return (0);
6198 }
6199
6200 static void
6201 iwm_preinit(void *arg)
6202 {
6203         struct iwm_softc *sc = arg;
6204         device_t dev = sc->sc_dev;
6205         struct ieee80211com *ic = &sc->sc_ic;
6206         int error;
6207
6208         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6209             "->%s\n", __func__);
6210
6211         IWM_LOCK(sc);
6212         if ((error = iwm_start_hw(sc)) != 0) {
6213                 device_printf(dev, "could not initialize hardware\n");
6214                 IWM_UNLOCK(sc);
6215                 goto fail;
6216         }
6217
6218         error = iwm_run_init_mvm_ucode(sc, 1);
6219         iwm_stop_device(sc);
6220         if (error) {
6221                 IWM_UNLOCK(sc);
6222                 goto fail;
6223         }
6224         device_printf(dev,
6225             "hw rev 0x%x, fw ver %s, address %s\n",
6226             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6227             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6228
6229         /* not all hardware can do 5GHz band */
6230         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6231                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6232                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6233         IWM_UNLOCK(sc);
6234
6235         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6236             ic->ic_channels);
6237
6238         /*
6239          * At this point we've committed - if we fail to do setup,
6240          * we now also have to tear down the net80211 state.
6241          */
6242         ieee80211_ifattach(ic);
6243         ic->ic_vap_create = iwm_vap_create;
6244         ic->ic_vap_delete = iwm_vap_delete;
6245         ic->ic_raw_xmit = iwm_raw_xmit;
6246         ic->ic_node_alloc = iwm_node_alloc;
6247         ic->ic_scan_start = iwm_scan_start;
6248         ic->ic_scan_end = iwm_scan_end;
6249         ic->ic_update_mcast = iwm_update_mcast;
6250         ic->ic_getradiocaps = iwm_init_channel_map;
6251         ic->ic_set_channel = iwm_set_channel;
6252         ic->ic_scan_curchan = iwm_scan_curchan;
6253         ic->ic_scan_mindwell = iwm_scan_mindwell;
6254         ic->ic_wme.wme_update = iwm_update_edca;
6255         ic->ic_parent = iwm_parent;
6256         ic->ic_transmit = iwm_transmit;
6257         iwm_radiotap_attach(sc);
6258         if (bootverbose)
6259                 ieee80211_announce(ic);
6260
6261         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6262             "<-%s\n", __func__);
6263         config_intrhook_disestablish(&sc->sc_preinit_hook);
6264
6265         return;
6266 fail:
6267         config_intrhook_disestablish(&sc->sc_preinit_hook);
6268         iwm_detach_local(sc, 0);
6269 }
6270
6271 /*
6272  * Attach the interface to 802.11 radiotap.
6273  */
6274 static void
6275 iwm_radiotap_attach(struct iwm_softc *sc)
6276 {
6277         struct ieee80211com *ic = &sc->sc_ic;
6278
6279         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6280             "->%s begin\n", __func__);
6281         ieee80211_radiotap_attach(ic,
6282             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6283                 IWM_TX_RADIOTAP_PRESENT,
6284             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6285                 IWM_RX_RADIOTAP_PRESENT);
6286         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6287             "->%s end\n", __func__);
6288 }
6289
6290 static struct ieee80211vap *
6291 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6292     enum ieee80211_opmode opmode, int flags,
6293     const uint8_t bssid[IEEE80211_ADDR_LEN],
6294     const uint8_t mac[IEEE80211_ADDR_LEN])
6295 {
6296         struct iwm_vap *ivp;
6297         struct ieee80211vap *vap;
6298
6299         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6300                 return NULL;
6301         ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6302         vap = &ivp->iv_vap;
6303         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6304         vap->iv_bmissthreshold = 10;            /* override default */
6305         /* Override with driver methods. */
6306         ivp->iv_newstate = vap->iv_newstate;
6307         vap->iv_newstate = iwm_newstate;
6308
6309         ieee80211_ratectl_init(vap);
6310         /* Complete setup. */
6311         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6312             mac);
6313         ic->ic_opmode = opmode;
6314
6315         return vap;
6316 }
6317
6318 static void
6319 iwm_vap_delete(struct ieee80211vap *vap)
6320 {
6321         struct iwm_vap *ivp = IWM_VAP(vap);
6322
6323         ieee80211_ratectl_deinit(vap);
6324         ieee80211_vap_detach(vap);
6325         kfree(ivp, M_80211_VAP);
6326 }
6327
6328 static void
6329 iwm_scan_start(struct ieee80211com *ic)
6330 {
6331         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6332         struct iwm_softc *sc = ic->ic_softc;
6333         int error;
6334
6335         IWM_LOCK(sc);
6336         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6337                 /* This should not be possible */
6338                 device_printf(sc->sc_dev,
6339                     "%s: Previous scan not completed yet\n", __func__);
6340         }
6341         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6342                 error = iwm_mvm_umac_scan(sc);
6343         else
6344                 error = iwm_mvm_lmac_scan(sc);
6345         if (error != 0) {
6346                 device_printf(sc->sc_dev, "could not initiate scan\n");
6347                 IWM_UNLOCK(sc);
6348                 ieee80211_cancel_scan(vap);
6349         } else {
6350                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6351                 iwm_led_blink_start(sc);
6352                 IWM_UNLOCK(sc);
6353         }
6354 }
6355
6356 static void
6357 iwm_scan_end(struct ieee80211com *ic)
6358 {
6359         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6360         struct iwm_softc *sc = ic->ic_softc;
6361
6362         IWM_LOCK(sc);
6363         iwm_led_blink_stop(sc);
6364         if (vap->iv_state == IEEE80211_S_RUN)
6365                 iwm_mvm_led_enable(sc);
6366         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6367                 /*
6368                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6369                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6370                  * taskqueue.
6371                  */
6372                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6373                 iwm_mvm_scan_stop_wait(sc);
6374         }
6375         IWM_UNLOCK(sc);
6376
6377         /*
6378          * Make sure we don't race, if sc_es_task is still enqueued here.
6379          * This is to make sure that it won't call ieee80211_scan_done
6380          * when we have already started the next scan.
6381          */
6382         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6383 }
6384
6385 static void
6386 iwm_update_mcast(struct ieee80211com *ic)
6387 {
6388 }
6389
6390 static void
6391 iwm_set_channel(struct ieee80211com *ic)
6392 {
6393 }
6394
6395 static void
6396 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6397 {
6398 }
6399
6400 static void
6401 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6402 {
6403         return;
6404 }
6405
6406 void
6407 iwm_init_task(void *arg1)
6408 {
6409         struct iwm_softc *sc = arg1;
6410
6411         IWM_LOCK(sc);
6412         while (sc->sc_flags & IWM_FLAG_BUSY) {
6413 #if defined(__DragonFly__)
6414                 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6415 #else
6416                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6417 #endif
6418 }
6419         sc->sc_flags |= IWM_FLAG_BUSY;
6420         iwm_stop(sc);
6421         if (sc->sc_ic.ic_nrunning > 0)
6422                 iwm_init(sc);
6423         sc->sc_flags &= ~IWM_FLAG_BUSY;
6424         wakeup(&sc->sc_flags);
6425         IWM_UNLOCK(sc);
6426 }
6427
6428 static int
6429 iwm_resume(device_t dev)
6430 {
6431         struct iwm_softc *sc = device_get_softc(dev);
6432         int do_reinit = 0;
6433
6434         /*
6435          * We disable the RETRY_TIMEOUT register (0x41) to keep
6436          * PCI Tx retries from interfering with C3 CPU state.
6437          */
6438         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6439         iwm_init_task(device_get_softc(dev));
6440
6441         IWM_LOCK(sc);
6442         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6443                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6444                 do_reinit = 1;
6445         }
6446         IWM_UNLOCK(sc);
6447
6448         if (do_reinit)
6449                 ieee80211_resume_all(&sc->sc_ic);
6450
6451         return 0;
6452 }
6453
6454 static int
6455 iwm_suspend(device_t dev)
6456 {
6457         int do_stop = 0;
6458         struct iwm_softc *sc = device_get_softc(dev);
6459
6460         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6461
6462         ieee80211_suspend_all(&sc->sc_ic);
6463
6464         if (do_stop) {
6465                 IWM_LOCK(sc);
6466                 iwm_stop(sc);
6467                 sc->sc_flags |= IWM_FLAG_SCANNING;
6468                 IWM_UNLOCK(sc);
6469         }
6470
6471         return (0);
6472 }
6473
6474 static int
6475 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6476 {
6477         struct iwm_fw_info *fw = &sc->sc_fw;
6478         device_t dev = sc->sc_dev;
6479         int i;
6480
6481         if (!sc->sc_attached)
6482                 return 0;
6483         sc->sc_attached = 0;
6484         if (do_net80211) {
6485                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6486         }
6487         callout_drain(&sc->sc_led_blink_to);
6488         callout_drain(&sc->sc_watchdog_to);
6489         iwm_stop_device(sc);
6490         if (do_net80211) {
6491                 ieee80211_ifdetach(&sc->sc_ic);
6492         }
6493
6494         iwm_phy_db_free(sc->sc_phy_db);
6495         sc->sc_phy_db = NULL;
6496
6497         iwm_free_nvm_data(sc->nvm_data);
6498
6499         /* Free descriptor rings */
6500         iwm_free_rx_ring(sc, &sc->rxq);
6501         for (i = 0; i < nitems(sc->txq); i++)
6502                 iwm_free_tx_ring(sc, &sc->txq[i]);
6503
6504         /* Free firmware */
6505         if (fw->fw_fp != NULL)
6506                 iwm_fw_info_free(fw);
6507
6508         /* Free scheduler */
6509         iwm_dma_contig_free(&sc->sched_dma);
6510         iwm_dma_contig_free(&sc->ict_dma);
6511         iwm_dma_contig_free(&sc->kw_dma);
6512         iwm_dma_contig_free(&sc->fw_dma);
6513
6514         iwm_free_fw_paging(sc);
6515
6516         /* Finished with the hardware - detach things */
6517         iwm_pci_detach(dev);
6518
6519         if (sc->sc_notif_wait != NULL) {
6520                 iwm_notification_wait_free(sc->sc_notif_wait);
6521                 sc->sc_notif_wait = NULL;
6522         }
6523
6524         mbufq_drain(&sc->sc_snd);
6525         IWM_LOCK_DESTROY(sc);
6526
6527         return (0);
6528 }
6529
6530 static int
6531 iwm_detach(device_t dev)
6532 {
6533         struct iwm_softc *sc = device_get_softc(dev);
6534
6535         return (iwm_detach_local(sc, 1));
6536 }
6537
6538 static device_method_t iwm_pci_methods[] = {
6539         /* Device interface */
6540         DEVMETHOD(device_probe,         iwm_probe),
6541         DEVMETHOD(device_attach,        iwm_attach),
6542         DEVMETHOD(device_detach,        iwm_detach),
6543         DEVMETHOD(device_suspend,       iwm_suspend),
6544         DEVMETHOD(device_resume,        iwm_resume),
6545
6546         DEVMETHOD_END
6547 };
6548
6549 static driver_t iwm_pci_driver = {
6550         "iwm",
6551         iwm_pci_methods,
6552         sizeof (struct iwm_softc)
6553 };
6554
6555 static devclass_t iwm_devclass;
6556
6557 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6558 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6559 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6560 MODULE_DEPEND(iwm, wlan, 1, 1, 1);