if_iwm - Sync nvm parsing code with Linux iwlwifi.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 /*
106  *                              DragonFly work
107  *
108  * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109  *       changes to remove per-device network interface (DragonFly has not
110  *       caught up to that yet on the WLAN side).
111  *
112  * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113  *      malloc -> kmalloc       (in particular, changing improper M_NOWAIT
114  *                              specifications to M_INTWAIT.  We still don't
115  *                              understand why FreeBSD uses M_NOWAIT for
116  *                              critical must-not-fail kmalloc()s).
117  *      free -> kfree
118  *      printf -> kprintf
119  *      (bug fix) memset in iwm_reset_rx_ring.
120  *      (debug)   added several kprintf()s on error
121  *
122  *      header file paths (DFly allows localized path specifications).
123  *      minor header file differences.
124  *
125  * Comprehensive list of adjustments for DragonFly #ifdef'd:
126  *      (safety)  added register read-back serialization in iwm_reset_rx_ring().
127  *      packet counters
128  *      msleep -> lksleep
129  *      mtx -> lk  (mtx functions -> lockmgr functions)
130  *      callout differences
131  *      taskqueue differences
132  *      MSI differences
133  *      bus_setup_intr() differences
134  *      minor PCI config register naming differences
135  */
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
138
139 #include <sys/param.h>
140 #include <sys/bus.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
150
151 #include <machine/endian.h>
152
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
155
156 #include <net/bpf.h>
157
158 #include <net/if.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
164
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
169
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
174
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_debug.h"
178 #include "if_iwm_util.h"
179 #include "if_iwm_binding.h"
180 #include "if_iwm_phy_db.h"
181 #include "if_iwm_mac_ctxt.h"
182 #include "if_iwm_phy_ctxt.h"
183 #include "if_iwm_time_event.h"
184 #include "if_iwm_power.h"
185 #include "if_iwm_scan.h"
186 #include "if_iwm_pcie_trans.h"
187 #include "if_iwm_led.h"
188
189 const uint8_t iwm_nvm_channels[] = {
190         /* 2.4 GHz */
191         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
192         /* 5 GHz */
193         36, 40, 44, 48, 52, 56, 60, 64,
194         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
195         149, 153, 157, 161, 165
196 };
197 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
198     "IWM_NUM_CHANNELS is too small");
199
200 const uint8_t iwm_nvm_channels_8000[] = {
201         /* 2.4 GHz */
202         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
203         /* 5 GHz */
204         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
205         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
206         149, 153, 157, 161, 165, 169, 173, 177, 181
207 };
208 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
209     "IWM_NUM_CHANNELS_8000 is too small");
210
211 #define IWM_NUM_2GHZ_CHANNELS   14
212 #define IWM_N_HW_ADDR_MASK      0xF
213
214 /*
215  * XXX For now, there's simply a fixed set of rate table entries
216  * that are populated.
217  */
218 const struct iwm_rate {
219         uint8_t rate;
220         uint8_t plcp;
221 } iwm_rates[] = {
222         {   2,  IWM_RATE_1M_PLCP  },
223         {   4,  IWM_RATE_2M_PLCP  },
224         {  11,  IWM_RATE_5M_PLCP  },
225         {  22,  IWM_RATE_11M_PLCP },
226         {  12,  IWM_RATE_6M_PLCP  },
227         {  18,  IWM_RATE_9M_PLCP  },
228         {  24,  IWM_RATE_12M_PLCP },
229         {  36,  IWM_RATE_18M_PLCP },
230         {  48,  IWM_RATE_24M_PLCP },
231         {  72,  IWM_RATE_36M_PLCP },
232         {  96,  IWM_RATE_48M_PLCP },
233         { 108,  IWM_RATE_54M_PLCP },
234 };
235 #define IWM_RIDX_CCK    0
236 #define IWM_RIDX_OFDM   4
237 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
238 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
239 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
240
241 struct iwm_nvm_section {
242         uint16_t length;
243         uint8_t *data;
244 };
245
246 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
247 static int      iwm_firmware_store_section(struct iwm_softc *,
248                                            enum iwm_ucode_type,
249                                            const uint8_t *, size_t);
250 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
251 static void     iwm_fw_info_free(struct iwm_fw_info *);
252 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
253 #if !defined(__DragonFly__)
254 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
255 #endif
256 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257                                      bus_size_t, bus_size_t);
258 static void     iwm_dma_contig_free(struct iwm_dma_info *);
259 static int      iwm_alloc_fwmem(struct iwm_softc *);
260 static int      iwm_alloc_sched(struct iwm_softc *);
261 static int      iwm_alloc_kw(struct iwm_softc *);
262 static int      iwm_alloc_ict(struct iwm_softc *);
263 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static void     iwm_disable_rx_dma(struct iwm_softc *);
265 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
266 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
267 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
268                                   int);
269 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
270 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
271 static void     iwm_enable_interrupts(struct iwm_softc *);
272 static void     iwm_restore_interrupts(struct iwm_softc *);
273 static void     iwm_disable_interrupts(struct iwm_softc *);
274 static void     iwm_ict_reset(struct iwm_softc *);
275 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
276 static void     iwm_stop_device(struct iwm_softc *);
277 static void     iwm_mvm_nic_config(struct iwm_softc *);
278 static int      iwm_nic_rx_init(struct iwm_softc *);
279 static int      iwm_nic_tx_init(struct iwm_softc *);
280 static int      iwm_nic_init(struct iwm_softc *);
281 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
282 static int      iwm_post_alive(struct iwm_softc *);
283 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
284                                    uint16_t, uint8_t *, uint16_t *);
285 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
286                                      uint16_t *, uint32_t);
287 static uint32_t iwm_eeprom_channel_flags(uint16_t);
288 static void     iwm_add_channel_band(struct iwm_softc *,
289                     struct ieee80211_channel[], int, int *, int, size_t,
290                     const uint8_t[]);
291 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
292                     struct ieee80211_channel[]);
293 static struct iwm_nvm_data *
294         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
295                            const uint16_t *, const uint16_t *,
296                            const uint16_t *, const uint16_t *,
297                            const uint16_t *);
298 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
299 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
300                                                struct iwm_nvm_data *,
301                                                const uint16_t *,
302                                                const uint16_t *);
303 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
304                             const uint16_t *);
305 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
306 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
307                                   const uint16_t *);
308 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
309                                    const uint16_t *);
310 static void     iwm_set_radio_cfg(const struct iwm_softc *,
311                                   struct iwm_nvm_data *, uint32_t);
312 static struct iwm_nvm_data *
313         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
314 static int      iwm_nvm_init(struct iwm_softc *);
315 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
316                                        const uint8_t *, uint32_t);
317 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
318                                         const uint8_t *, uint32_t);
319 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
320 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
321                                            struct iwm_fw_sects *, int , int *);
322 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
323 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
324 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
325 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
326 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
327 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
328                                               enum iwm_ucode_type);
329 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
330 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
331 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
332 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
333                                             struct iwm_rx_phy_info *);
334 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
335                                       struct iwm_rx_packet *,
336                                       struct iwm_rx_data *);
337 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
338 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
339                                    struct iwm_rx_data *);
340 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
341                                          struct iwm_rx_packet *,
342                                          struct iwm_node *);
343 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
344                                   struct iwm_rx_data *);
345 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
346 #if 0
347 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
348                                  uint16_t);
349 #endif
350 static const struct iwm_rate *
351         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
352                         struct ieee80211_frame *, struct iwm_tx_cmd *);
353 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
354                        struct ieee80211_node *, int);
355 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
356                              const struct ieee80211_bpf_params *);
357 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
358                                                 struct iwm_mvm_add_sta_cmd_v7 *,
359                                                 int *);
360 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
361                                        int);
362 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
363 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
364 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
365                                            struct iwm_int_sta *,
366                                            const uint8_t *, uint16_t, uint16_t);
367 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
368 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
369 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
370 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
371 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
372 static struct ieee80211_node *
373                 iwm_node_alloc(struct ieee80211vap *,
374                                const uint8_t[IEEE80211_ADDR_LEN]);
375 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
376 static int      iwm_media_change(struct ifnet *);
377 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
378 static void     iwm_endscan_cb(void *, int);
379 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
380                                         struct iwm_sf_cfg_cmd *,
381                                         struct ieee80211_node *);
382 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
383 static int      iwm_send_bt_init_conf(struct iwm_softc *);
384 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386 static int      iwm_init_hw(struct iwm_softc *);
387 static void     iwm_init(struct iwm_softc *);
388 static void     iwm_start(struct iwm_softc *);
389 static void     iwm_stop(struct iwm_softc *);
390 static void     iwm_watchdog(void *);
391 static void     iwm_parent(struct ieee80211com *);
392 #ifdef IWM_DEBUG
393 static const char *
394                 iwm_desc_lookup(uint32_t);
395 static void     iwm_nic_error(struct iwm_softc *);
396 static void     iwm_nic_umac_error(struct iwm_softc *);
397 #endif
398 static void     iwm_notif_intr(struct iwm_softc *);
399 static void     iwm_intr(void *);
400 static int      iwm_attach(device_t);
401 static int      iwm_is_valid_ether_addr(uint8_t *);
402 static void     iwm_preinit(void *);
403 static int      iwm_detach_local(struct iwm_softc *sc, int);
404 static void     iwm_init_task(void *);
405 static void     iwm_radiotap_attach(struct iwm_softc *);
406 static struct ieee80211vap *
407                 iwm_vap_create(struct ieee80211com *,
408                                const char [IFNAMSIZ], int,
409                                enum ieee80211_opmode, int,
410                                const uint8_t [IEEE80211_ADDR_LEN],
411                                const uint8_t [IEEE80211_ADDR_LEN]);
412 static void     iwm_vap_delete(struct ieee80211vap *);
413 static void     iwm_scan_start(struct ieee80211com *);
414 static void     iwm_scan_end(struct ieee80211com *);
415 static void     iwm_update_mcast(struct ieee80211com *);
416 static void     iwm_set_channel(struct ieee80211com *);
417 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
418 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
419 static int      iwm_detach(device_t);
420
421 #if defined(__DragonFly__)
422 static int      iwm_msi_enable = 1;
423
424 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
425
426 #endif
427
428 /*
429  * Firmware parser.
430  */
431
432 static int
433 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
434 {
435         const struct iwm_fw_cscheme_list *l = (const void *)data;
436
437         if (dlen < sizeof(*l) ||
438             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
439                 return EINVAL;
440
441         /* we don't actually store anything for now, always use s/w crypto */
442
443         return 0;
444 }
445
446 static int
447 iwm_firmware_store_section(struct iwm_softc *sc,
448     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
449 {
450         struct iwm_fw_sects *fws;
451         struct iwm_fw_onesect *fwone;
452
453         if (type >= IWM_UCODE_TYPE_MAX)
454                 return EINVAL;
455         if (dlen < sizeof(uint32_t))
456                 return EINVAL;
457
458         fws = &sc->sc_fw.fw_sects[type];
459         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
460                 return EINVAL;
461
462         fwone = &fws->fw_sect[fws->fw_count];
463
464         /* first 32bit are device load offset */
465         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
466
467         /* rest is data */
468         fwone->fws_data = data + sizeof(uint32_t);
469         fwone->fws_len = dlen - sizeof(uint32_t);
470
471         fws->fw_count++;
472         fws->fw_totlen += fwone->fws_len;
473
474         return 0;
475 }
476
477 #define IWM_DEFAULT_SCAN_CHANNELS 40
478
479 struct iwm_tlv_calib_data {
480         uint32_t ucode_type;
481         struct iwm_tlv_calib_ctrl calib;
482 } __packed;
483
484 static int
485 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
486 {
487         const struct iwm_tlv_calib_data *def_calib = data;
488         uint32_t ucode_type = le32toh(def_calib->ucode_type);
489
490         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
491                 device_printf(sc->sc_dev,
492                     "Wrong ucode_type %u for default "
493                     "calibration.\n", ucode_type);
494                 return EINVAL;
495         }
496
497         sc->sc_default_calib[ucode_type].flow_trigger =
498             def_calib->calib.flow_trigger;
499         sc->sc_default_calib[ucode_type].event_trigger =
500             def_calib->calib.event_trigger;
501
502         return 0;
503 }
504
505 static void
506 iwm_fw_info_free(struct iwm_fw_info *fw)
507 {
508         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
509         fw->fw_fp = NULL;
510         /* don't touch fw->fw_status */
511         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
512 }
513
514 static int
515 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
516 {
517         struct iwm_fw_info *fw = &sc->sc_fw;
518         const struct iwm_tlv_ucode_header *uhdr;
519         struct iwm_ucode_tlv tlv;
520         enum iwm_ucode_tlv_type tlv_type;
521         const struct firmware *fwp;
522         const uint8_t *data;
523         int error = 0;
524         size_t len;
525
526         if (fw->fw_status == IWM_FW_STATUS_DONE &&
527             ucode_type != IWM_UCODE_TYPE_INIT)
528                 return 0;
529
530         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
531 #if defined(__DragonFly__)
532                 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
533 #else
534                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
535 #endif
536         }
537         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
538
539         if (fw->fw_fp != NULL)
540                 iwm_fw_info_free(fw);
541
542         /*
543          * Load firmware into driver memory.
544          * fw_fp will be set.
545          */
546         IWM_UNLOCK(sc);
547         fwp = firmware_get(sc->sc_fwname);
548         IWM_LOCK(sc);
549         if (fwp == NULL) {
550                 device_printf(sc->sc_dev,
551                     "could not read firmware %s (error %d)\n",
552                     sc->sc_fwname, error);
553                 goto out;
554         }
555         fw->fw_fp = fwp;
556
557         /* (Re-)Initialize default values. */
558         sc->sc_capaflags = 0;
559         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
560         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
561         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
562
563         /*
564          * Parse firmware contents
565          */
566
567         uhdr = (const void *)fw->fw_fp->data;
568         if (*(const uint32_t *)fw->fw_fp->data != 0
569             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
570                 device_printf(sc->sc_dev, "invalid firmware %s\n",
571                     sc->sc_fwname);
572                 error = EINVAL;
573                 goto out;
574         }
575
576         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
577             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
578             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
579             IWM_UCODE_API(le32toh(uhdr->ver)));
580         data = uhdr->data;
581         len = fw->fw_fp->datasize - sizeof(*uhdr);
582
583         while (len >= sizeof(tlv)) {
584                 size_t tlv_len;
585                 const void *tlv_data;
586
587                 memcpy(&tlv, data, sizeof(tlv));
588                 tlv_len = le32toh(tlv.length);
589                 tlv_type = le32toh(tlv.type);
590
591                 len -= sizeof(tlv);
592                 data += sizeof(tlv);
593                 tlv_data = data;
594
595                 if (len < tlv_len) {
596                         device_printf(sc->sc_dev,
597                             "firmware too short: %zu bytes\n",
598                             len);
599                         error = EINVAL;
600                         goto parse_out;
601                 }
602
603                 switch ((int)tlv_type) {
604                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
605                         if (tlv_len < sizeof(uint32_t)) {
606                                 device_printf(sc->sc_dev,
607                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
608                                     __func__,
609                                     (int) tlv_len);
610                                 error = EINVAL;
611                                 goto parse_out;
612                         }
613                         sc->sc_capa_max_probe_len
614                             = le32toh(*(const uint32_t *)tlv_data);
615                         /* limit it to something sensible */
616                         if (sc->sc_capa_max_probe_len >
617                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
618                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
619                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
620                                     "ridiculous\n", __func__);
621                                 error = EINVAL;
622                                 goto parse_out;
623                         }
624                         break;
625                 case IWM_UCODE_TLV_PAN:
626                         if (tlv_len) {
627                                 device_printf(sc->sc_dev,
628                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
629                                     __func__,
630                                     (int) tlv_len);
631                                 error = EINVAL;
632                                 goto parse_out;
633                         }
634                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
635                         break;
636                 case IWM_UCODE_TLV_FLAGS:
637                         if (tlv_len < sizeof(uint32_t)) {
638                                 device_printf(sc->sc_dev,
639                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
640                                     __func__,
641                                     (int) tlv_len);
642                                 error = EINVAL;
643                                 goto parse_out;
644                         }
645                         /*
646                          * Apparently there can be many flags, but Linux driver
647                          * parses only the first one, and so do we.
648                          *
649                          * XXX: why does this override IWM_UCODE_TLV_PAN?
650                          * Intentional or a bug?  Observations from
651                          * current firmware file:
652                          *  1) TLV_PAN is parsed first
653                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
654                          * ==> this resets TLV_PAN to itself... hnnnk
655                          */
656                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
657                         break;
658                 case IWM_UCODE_TLV_CSCHEME:
659                         if ((error = iwm_store_cscheme(sc,
660                             tlv_data, tlv_len)) != 0) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: iwm_store_cscheme(): returned %d\n",
663                                     __func__,
664                                     error);
665                                 goto parse_out;
666                         }
667                         break;
668                 case IWM_UCODE_TLV_NUM_OF_CPU: {
669                         uint32_t num_cpu;
670                         if (tlv_len != sizeof(uint32_t)) {
671                                 device_printf(sc->sc_dev,
672                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
673                                     __func__,
674                                     (int) tlv_len);
675                                 error = EINVAL;
676                                 goto parse_out;
677                         }
678                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
679                         if (num_cpu < 1 || num_cpu > 2) {
680                                 device_printf(sc->sc_dev,
681                                     "%s: Driver supports only 1 or 2 CPUs\n",
682                                     __func__);
683                                 error = EINVAL;
684                                 goto parse_out;
685                         }
686                         break;
687                 }
688                 case IWM_UCODE_TLV_SEC_RT:
689                         if ((error = iwm_firmware_store_section(sc,
690                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
691                                 device_printf(sc->sc_dev,
692                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
693                                     __func__,
694                                     error);
695                                 goto parse_out;
696                         }
697                         break;
698                 case IWM_UCODE_TLV_SEC_INIT:
699                         if ((error = iwm_firmware_store_section(sc,
700                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
701                                 device_printf(sc->sc_dev,
702                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
703                                     __func__,
704                                     error);
705                                 goto parse_out;
706                         }
707                         break;
708                 case IWM_UCODE_TLV_SEC_WOWLAN:
709                         if ((error = iwm_firmware_store_section(sc,
710                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
711                                 device_printf(sc->sc_dev,
712                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
713                                     __func__,
714                                     error);
715                                 goto parse_out;
716                         }
717                         break;
718                 case IWM_UCODE_TLV_DEF_CALIB:
719                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
720                                 device_printf(sc->sc_dev,
721                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
722                                     __func__,
723                                     (int) tlv_len,
724                                     (int) sizeof(struct iwm_tlv_calib_data));
725                                 error = EINVAL;
726                                 goto parse_out;
727                         }
728                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
729                                 device_printf(sc->sc_dev,
730                                     "%s: iwm_set_default_calib() failed: %d\n",
731                                     __func__,
732                                     error);
733                                 goto parse_out;
734                         }
735                         break;
736                 case IWM_UCODE_TLV_PHY_SKU:
737                         if (tlv_len != sizeof(uint32_t)) {
738                                 error = EINVAL;
739                                 device_printf(sc->sc_dev,
740                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
741                                     __func__,
742                                     (int) tlv_len);
743                                 goto parse_out;
744                         }
745                         sc->sc_fw_phy_config =
746                             le32toh(*(const uint32_t *)tlv_data);
747                         break;
748
749                 case IWM_UCODE_TLV_API_CHANGES_SET: {
750                         const struct iwm_ucode_api *api;
751                         if (tlv_len != sizeof(*api)) {
752                                 error = EINVAL;
753                                 goto parse_out;
754                         }
755                         api = (const struct iwm_ucode_api *)tlv_data;
756                         /* Flags may exceed 32 bits in future firmware. */
757                         if (le32toh(api->api_index) > 0) {
758                                 device_printf(sc->sc_dev,
759                                     "unsupported API index %d\n",
760                                     le32toh(api->api_index));
761                                 goto parse_out;
762                         }
763                         sc->sc_ucode_api = le32toh(api->api_flags);
764                         break;
765                 }
766
767                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
768                         const struct iwm_ucode_capa *capa;
769                         int idx, i;
770                         if (tlv_len != sizeof(*capa)) {
771                                 error = EINVAL;
772                                 goto parse_out;
773                         }
774                         capa = (const struct iwm_ucode_capa *)tlv_data;
775                         idx = le32toh(capa->api_index);
776                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
777                                 device_printf(sc->sc_dev,
778                                     "unsupported API index %d\n", idx);
779                                 goto parse_out;
780                         }
781                         for (i = 0; i < 32; i++) {
782                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
783                                         continue;
784                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
785                         }
786                         break;
787                 }
788
789                 case 48: /* undocumented TLV */
790                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
791                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
792                         /* ignore, not used by current driver */
793                         break;
794
795                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
796                         if ((error = iwm_firmware_store_section(sc,
797                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
798                             tlv_len)) != 0)
799                                 goto parse_out;
800                         break;
801
802                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
803                         if (tlv_len != sizeof(uint32_t)) {
804                                 error = EINVAL;
805                                 goto parse_out;
806                         }
807                         sc->sc_capa_n_scan_channels =
808                           le32toh(*(const uint32_t *)tlv_data);
809                         break;
810
811                 case IWM_UCODE_TLV_FW_VERSION:
812                         if (tlv_len != sizeof(uint32_t) * 3) {
813                                 error = EINVAL;
814                                 goto parse_out;
815                         }
816                         ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
817                             "%d.%d.%d",
818                             le32toh(((const uint32_t *)tlv_data)[0]),
819                             le32toh(((const uint32_t *)tlv_data)[1]),
820                             le32toh(((const uint32_t *)tlv_data)[2]));
821                         break;
822
823                 default:
824                         device_printf(sc->sc_dev,
825                             "%s: unknown firmware section %d, abort\n",
826                             __func__, tlv_type);
827                         error = EINVAL;
828                         goto parse_out;
829                 }
830
831                 len -= roundup(tlv_len, 4);
832                 data += roundup(tlv_len, 4);
833         }
834
835         KASSERT(error == 0, ("unhandled error"));
836
837  parse_out:
838         if (error) {
839                 device_printf(sc->sc_dev, "firmware parse error %d, "
840                     "section type %d\n", error, tlv_type);
841         }
842
843         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
844                 device_printf(sc->sc_dev,
845                     "device uses unsupported power ops\n");
846                 error = ENOTSUP;
847         }
848
849  out:
850         if (error) {
851                 fw->fw_status = IWM_FW_STATUS_NONE;
852                 if (fw->fw_fp != NULL)
853                         iwm_fw_info_free(fw);
854         } else
855                 fw->fw_status = IWM_FW_STATUS_DONE;
856         wakeup(&sc->sc_fw);
857
858         return error;
859 }
860
861 /*
862  * DMA resource routines
863  */
864
865 #if !defined(__DragonFly__)
866 static void
867 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868 {
869         if (error != 0)
870                 return;
871         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
872         *(bus_addr_t *)arg = segs[0].ds_addr;
873 }
874 #endif
875
876 static int
877 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
878     bus_size_t size, bus_size_t alignment)
879 {
880         int error;
881
882         dma->tag = NULL;
883         dma->map = NULL;
884         dma->size = size;
885         dma->vaddr = NULL;
886
887 #if defined(__DragonFly__)
888         bus_dmamem_t dmem;
889         error = bus_dmamem_coherent(tag, alignment, 0,
890                                     BUS_SPACE_MAXADDR_32BIT,
891                                     BUS_SPACE_MAXADDR,
892                                     size, BUS_DMA_NOWAIT, &dmem);
893         if (error != 0)
894                 goto fail;
895
896         dma->tag = dmem.dmem_tag;
897         dma->map = dmem.dmem_map;
898         dma->vaddr = dmem.dmem_addr;
899         dma->paddr = dmem.dmem_busaddr;
900 #else
901         error = bus_dma_tag_create(tag, alignment,
902             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
903             1, size, 0, NULL, NULL, &dma->tag);
904         if (error != 0)
905                 goto fail;
906
907         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
908             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
909         if (error != 0)
910                 goto fail;
911
912         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
913             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
914         if (error != 0) {
915                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
916                 dma->vaddr = NULL;
917                 goto fail;
918         }
919 #endif
920
921         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
922
923         return 0;
924
925 fail:
926         iwm_dma_contig_free(dma);
927
928         return error;
929 }
930
931 static void
932 iwm_dma_contig_free(struct iwm_dma_info *dma)
933 {
934         if (dma->vaddr != NULL) {
935                 bus_dmamap_sync(dma->tag, dma->map,
936                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
937                 bus_dmamap_unload(dma->tag, dma->map);
938                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
939                 dma->vaddr = NULL;
940         }
941         if (dma->tag != NULL) {
942                 bus_dma_tag_destroy(dma->tag);
943                 dma->tag = NULL;
944         }
945 }
946
947 /* fwmem is used to load firmware onto the card */
948 static int
949 iwm_alloc_fwmem(struct iwm_softc *sc)
950 {
951         /* Must be aligned on a 16-byte boundary. */
952         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
953             sc->sc_fwdmasegsz, 16);
954 }
955
956 /* tx scheduler rings.  not used? */
957 static int
958 iwm_alloc_sched(struct iwm_softc *sc)
959 {
960         /* TX scheduler rings must be aligned on a 1KB boundary. */
961         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
962             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
963 }
964
965 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
966 static int
967 iwm_alloc_kw(struct iwm_softc *sc)
968 {
969         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
970 }
971
972 /* interrupt cause table */
973 static int
974 iwm_alloc_ict(struct iwm_softc *sc)
975 {
976         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
977             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
978 }
979
980 static int
981 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
982 {
983         bus_size_t size;
984         int i, error;
985
986         ring->cur = 0;
987
988         /* Allocate RX descriptors (256-byte aligned). */
989         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
990         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
991         if (error != 0) {
992                 device_printf(sc->sc_dev,
993                     "could not allocate RX ring DMA memory\n");
994                 goto fail;
995         }
996         ring->desc = ring->desc_dma.vaddr;
997
998         /* Allocate RX status area (16-byte aligned). */
999         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1000             sizeof(*ring->stat), 16);
1001         if (error != 0) {
1002                 device_printf(sc->sc_dev,
1003                     "could not allocate RX status DMA memory\n");
1004                 goto fail;
1005         }
1006         ring->stat = ring->stat_dma.vaddr;
1007
1008         /* Create RX buffer DMA tag. */
1009 #if defined(__DragonFly__)
1010         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1011                                    0,
1012                                    BUS_SPACE_MAXADDR_32BIT,
1013                                    BUS_SPACE_MAXADDR,
1014                                    NULL, NULL,
1015                                    IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1016                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1017 #else
1018         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1019             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1020             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1021 #endif
1022         if (error != 0) {
1023                 device_printf(sc->sc_dev,
1024                     "%s: could not create RX buf DMA tag, error %d\n",
1025                     __func__, error);
1026                 goto fail;
1027         }
1028
1029         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1030         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1031         if (error != 0) {
1032                 device_printf(sc->sc_dev,
1033                     "%s: could not create RX buf DMA map, error %d\n",
1034                     __func__, error);
1035                 goto fail;
1036         }
1037         /*
1038          * Allocate and map RX buffers.
1039          */
1040         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1041                 struct iwm_rx_data *data = &ring->data[i];
1042                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1043                 if (error != 0) {
1044                         device_printf(sc->sc_dev,
1045                             "%s: could not create RX buf DMA map, error %d\n",
1046                             __func__, error);
1047                         goto fail;
1048                 }
1049                 data->m = NULL;
1050
1051                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1052                         goto fail;
1053                 }
1054         }
1055         return 0;
1056
1057 fail:   iwm_free_rx_ring(sc, ring);
1058         return error;
1059 }
1060
1061 static void
1062 iwm_disable_rx_dma(struct iwm_softc *sc)
1063 {
1064         /* XXX conditional nic locks are stupid */
1065         /* XXX print out if we can't lock the NIC? */
1066         if (iwm_nic_lock(sc)) {
1067                 /* XXX handle if RX stop doesn't finish? */
1068                 (void) iwm_pcie_rx_stop(sc);
1069                 iwm_nic_unlock(sc);
1070         }
1071 }
1072
1073 static void
1074 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1075 {
1076         /* Reset the ring state */
1077         ring->cur = 0;
1078
1079         /*
1080          * The hw rx ring index in shared memory must also be cleared,
1081          * otherwise the discrepancy can cause reprocessing chaos.
1082          */
1083         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1084 }
1085
1086 static void
1087 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1088 {
1089         int i;
1090
1091         iwm_dma_contig_free(&ring->desc_dma);
1092         iwm_dma_contig_free(&ring->stat_dma);
1093
1094         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1095                 struct iwm_rx_data *data = &ring->data[i];
1096
1097                 if (data->m != NULL) {
1098                         bus_dmamap_sync(ring->data_dmat, data->map,
1099                             BUS_DMASYNC_POSTREAD);
1100                         bus_dmamap_unload(ring->data_dmat, data->map);
1101                         m_freem(data->m);
1102                         data->m = NULL;
1103                 }
1104                 if (data->map != NULL) {
1105                         bus_dmamap_destroy(ring->data_dmat, data->map);
1106                         data->map = NULL;
1107                 }
1108         }
1109         if (ring->spare_map != NULL) {
1110                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1111                 ring->spare_map = NULL;
1112         }
1113         if (ring->data_dmat != NULL) {
1114                 bus_dma_tag_destroy(ring->data_dmat);
1115                 ring->data_dmat = NULL;
1116         }
1117 }
1118
1119 static int
1120 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1121 {
1122         bus_addr_t paddr;
1123         bus_size_t size;
1124         size_t maxsize;
1125         int nsegments;
1126         int i, error;
1127
1128         ring->qid = qid;
1129         ring->queued = 0;
1130         ring->cur = 0;
1131
1132         /* Allocate TX descriptors (256-byte aligned). */
1133         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1134         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1135         if (error != 0) {
1136                 device_printf(sc->sc_dev,
1137                     "could not allocate TX ring DMA memory\n");
1138                 goto fail;
1139         }
1140         ring->desc = ring->desc_dma.vaddr;
1141
1142         /*
1143          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1144          * to allocate commands space for other rings.
1145          */
1146         if (qid > IWM_MVM_CMD_QUEUE)
1147                 return 0;
1148
1149         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1150         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1151         if (error != 0) {
1152                 device_printf(sc->sc_dev,
1153                     "could not allocate TX cmd DMA memory\n");
1154                 goto fail;
1155         }
1156         ring->cmd = ring->cmd_dma.vaddr;
1157
1158         /* FW commands may require more mapped space than packets. */
1159         if (qid == IWM_MVM_CMD_QUEUE) {
1160                 maxsize = IWM_RBUF_SIZE;
1161                 nsegments = 1;
1162         } else {
1163                 maxsize = MCLBYTES;
1164                 nsegments = IWM_MAX_SCATTER - 2;
1165         }
1166
1167 #if defined(__DragonFly__)
1168         error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1169                                    0,
1170                                    BUS_SPACE_MAXADDR_32BIT,
1171                                    BUS_SPACE_MAXADDR,
1172                                    NULL, NULL,
1173                                    maxsize, nsegments, maxsize,
1174                                    BUS_DMA_NOWAIT, &ring->data_dmat);
1175 #else
1176         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1177             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1178             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1179 #endif
1180         if (error != 0) {
1181                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1182                 goto fail;
1183         }
1184
1185         paddr = ring->cmd_dma.paddr;
1186         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1187                 struct iwm_tx_data *data = &ring->data[i];
1188
1189                 data->cmd_paddr = paddr;
1190                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1191                     + offsetof(struct iwm_tx_cmd, scratch);
1192                 paddr += sizeof(struct iwm_device_cmd);
1193
1194                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1195                 if (error != 0) {
1196                         device_printf(sc->sc_dev,
1197                             "could not create TX buf DMA map\n");
1198                         goto fail;
1199                 }
1200         }
1201         KASSERT(paddr == ring->cmd_dma.paddr + size,
1202             ("invalid physical address"));
1203         return 0;
1204
1205 fail:   iwm_free_tx_ring(sc, ring);
1206         return error;
1207 }
1208
1209 static void
1210 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1211 {
1212         int i;
1213
1214         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1215                 struct iwm_tx_data *data = &ring->data[i];
1216
1217                 if (data->m != NULL) {
1218                         bus_dmamap_sync(ring->data_dmat, data->map,
1219                             BUS_DMASYNC_POSTWRITE);
1220                         bus_dmamap_unload(ring->data_dmat, data->map);
1221                         m_freem(data->m);
1222                         data->m = NULL;
1223                 }
1224         }
1225         /* Clear TX descriptors. */
1226         memset(ring->desc, 0, ring->desc_dma.size);
1227         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1228             BUS_DMASYNC_PREWRITE);
1229         sc->qfullmsk &= ~(1 << ring->qid);
1230         ring->queued = 0;
1231         ring->cur = 0;
1232 }
1233
1234 static void
1235 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1236 {
1237         int i;
1238
1239         iwm_dma_contig_free(&ring->desc_dma);
1240         iwm_dma_contig_free(&ring->cmd_dma);
1241
1242         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1243                 struct iwm_tx_data *data = &ring->data[i];
1244
1245                 if (data->m != NULL) {
1246                         bus_dmamap_sync(ring->data_dmat, data->map,
1247                             BUS_DMASYNC_POSTWRITE);
1248                         bus_dmamap_unload(ring->data_dmat, data->map);
1249                         m_freem(data->m);
1250                         data->m = NULL;
1251                 }
1252                 if (data->map != NULL) {
1253                         bus_dmamap_destroy(ring->data_dmat, data->map);
1254                         data->map = NULL;
1255                 }
1256         }
1257         if (ring->data_dmat != NULL) {
1258                 bus_dma_tag_destroy(ring->data_dmat);
1259                 ring->data_dmat = NULL;
1260         }
1261 }
1262
1263 /*
1264  * High-level hardware frobbing routines
1265  */
1266
1267 static void
1268 iwm_enable_interrupts(struct iwm_softc *sc)
1269 {
1270         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1271         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1272 }
1273
1274 static void
1275 iwm_restore_interrupts(struct iwm_softc *sc)
1276 {
1277         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1278 }
1279
1280 static void
1281 iwm_disable_interrupts(struct iwm_softc *sc)
1282 {
1283         /* disable interrupts */
1284         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1285
1286         /* acknowledge all interrupts */
1287         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1288         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1289 }
1290
1291 static void
1292 iwm_ict_reset(struct iwm_softc *sc)
1293 {
1294         iwm_disable_interrupts(sc);
1295
1296         /* Reset ICT table. */
1297         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1298         sc->ict_cur = 0;
1299
1300         /* Set physical address of ICT table (4KB aligned). */
1301         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1302             IWM_CSR_DRAM_INT_TBL_ENABLE
1303             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1304             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1305             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1306
1307         /* Switch to ICT interrupt mode in driver. */
1308         sc->sc_flags |= IWM_FLAG_USE_ICT;
1309
1310         /* Re-enable interrupts. */
1311         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1312         iwm_enable_interrupts(sc);
1313 }
1314
1315 /*
1316  * Since this .. hard-resets things, it's time to actually
1317  * mark the first vap (if any) as having no mac context.
1318  * It's annoying, but since the driver is potentially being
1319  * stop/start'ed whilst active (thanks openbsd port!) we
1320  * have to correctly track this.
1321  */
1322 static void
1323 iwm_stop_device(struct iwm_softc *sc)
1324 {
1325         struct ieee80211com *ic = &sc->sc_ic;
1326         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1327         int chnl, qid;
1328         uint32_t mask = 0;
1329
1330         /* tell the device to stop sending interrupts */
1331         iwm_disable_interrupts(sc);
1332
1333         /*
1334          * FreeBSD-local: mark the first vap as not-uploaded,
1335          * so the next transition through auth/assoc
1336          * will correctly populate the MAC context.
1337          */
1338         if (vap) {
1339                 struct iwm_vap *iv = IWM_VAP(vap);
1340                 iv->is_uploaded = 0;
1341         }
1342
1343         /* device going down, Stop using ICT table */
1344         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1345
1346         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1347
1348         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1349
1350         if (iwm_nic_lock(sc)) {
1351                 /* Stop each Tx DMA channel */
1352                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1353                         IWM_WRITE(sc,
1354                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1355                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1356                 }
1357
1358                 /* Wait for DMA channels to be idle */
1359                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1360                     5000)) {
1361                         device_printf(sc->sc_dev,
1362                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1363                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1364                 }
1365                 iwm_nic_unlock(sc);
1366         }
1367         iwm_disable_rx_dma(sc);
1368
1369         /* Stop RX ring. */
1370         iwm_reset_rx_ring(sc, &sc->rxq);
1371
1372         /* Reset all TX rings. */
1373         for (qid = 0; qid < nitems(sc->txq); qid++)
1374                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1375
1376         /*
1377          * Power-down device's busmaster DMA clocks
1378          */
1379         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1380         DELAY(5);
1381
1382         /* Make sure (redundant) we've released our request to stay awake */
1383         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1384             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1385
1386         /* Stop the device, and put it in low power state */
1387         iwm_apm_stop(sc);
1388
1389         /* stop and reset the on-board processor */
1390         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1391         DELAY(1000);
1392
1393         /*
1394          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1395          * This is a bug in certain verions of the hardware.
1396          * Certain devices also keep sending HW RF kill interrupt all
1397          * the time, unless the interrupt is ACKed even if the interrupt
1398          * should be masked. Re-ACK all the interrupts here.
1399          */
1400         iwm_disable_interrupts(sc);
1401
1402         /*
1403          * Even if we stop the HW, we still want the RF kill
1404          * interrupt
1405          */
1406         iwm_enable_rfkill_int(sc);
1407         iwm_check_rfkill(sc);
1408 }
1409
1410 static void
1411 iwm_mvm_nic_config(struct iwm_softc *sc)
1412 {
1413         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1414         uint32_t reg_val = 0;
1415
1416         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1417             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1418         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1419             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1420         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1421             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1422
1423         /* SKU control */
1424         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1425             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1426         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1427             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1428
1429         /* radio configuration */
1430         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1431         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1432         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1433
1434         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1435
1436         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1437             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1438             radio_cfg_step, radio_cfg_dash);
1439
1440         /*
1441          * W/A : NIC is stuck in a reset state after Early PCIe power off
1442          * (PCIe power is lost before PERST# is asserted), causing ME FW
1443          * to lose ownership and not being able to obtain it back.
1444          */
1445         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1446                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1447                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1448                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1449         }
1450 }
1451
1452 static int
1453 iwm_nic_rx_init(struct iwm_softc *sc)
1454 {
1455         if (!iwm_nic_lock(sc))
1456                 return EBUSY;
1457
1458         /*
1459          * Initialize RX ring.  This is from the iwn driver.
1460          */
1461         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1462
1463         /* stop DMA */
1464         iwm_disable_rx_dma(sc);
1465         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1466         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1467         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1468         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1469
1470         /* Set physical address of RX ring (256-byte aligned). */
1471         IWM_WRITE(sc,
1472             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1473
1474         /* Set physical address of RX status (16-byte aligned). */
1475         IWM_WRITE(sc,
1476             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1477
1478 #if defined(__DragonFly__)
1479         /* Force serialization (probably not needed but don't trust the HW) */
1480         IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1481 #endif
1482
1483         /* Enable RX. */
1484         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1485             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1486             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1487             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1488             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1489             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1490             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1491             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1492
1493         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1494
1495         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1496         if (sc->host_interrupt_operation_mode)
1497                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1498
1499         /*
1500          * Thus sayeth el jefe (iwlwifi) via a comment:
1501          *
1502          * This value should initially be 0 (before preparing any
1503          * RBs), should be 8 after preparing the first 8 RBs (for example)
1504          */
1505         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1506
1507         iwm_nic_unlock(sc);
1508
1509         return 0;
1510 }
1511
1512 static int
1513 iwm_nic_tx_init(struct iwm_softc *sc)
1514 {
1515         int qid;
1516
1517         if (!iwm_nic_lock(sc))
1518                 return EBUSY;
1519
1520         /* Deactivate TX scheduler. */
1521         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1522
1523         /* Set physical address of "keep warm" page (16-byte aligned). */
1524         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1525
1526         /* Initialize TX rings. */
1527         for (qid = 0; qid < nitems(sc->txq); qid++) {
1528                 struct iwm_tx_ring *txq = &sc->txq[qid];
1529
1530                 /* Set physical address of TX ring (256-byte aligned). */
1531                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1532                     txq->desc_dma.paddr >> 8);
1533                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1534                     "%s: loading ring %d descriptors (%p) at %lx\n",
1535                     __func__,
1536                     qid, txq->desc,
1537                     (unsigned long) (txq->desc_dma.paddr >> 8));
1538         }
1539
1540         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1541
1542         iwm_nic_unlock(sc);
1543
1544         return 0;
1545 }
1546
1547 static int
1548 iwm_nic_init(struct iwm_softc *sc)
1549 {
1550         int error;
1551
1552         iwm_apm_init(sc);
1553         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1554                 iwm_set_pwr(sc);
1555
1556         iwm_mvm_nic_config(sc);
1557
1558         if ((error = iwm_nic_rx_init(sc)) != 0)
1559                 return error;
1560
1561         /*
1562          * Ditto for TX, from iwn
1563          */
1564         if ((error = iwm_nic_tx_init(sc)) != 0)
1565                 return error;
1566
1567         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1568             "%s: shadow registers enabled\n", __func__);
1569         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1570
1571         return 0;
1572 }
1573
1574 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1575         IWM_MVM_TX_FIFO_VO,
1576         IWM_MVM_TX_FIFO_VI,
1577         IWM_MVM_TX_FIFO_BE,
1578         IWM_MVM_TX_FIFO_BK,
1579 };
1580
1581 static int
1582 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1583 {
1584         if (!iwm_nic_lock(sc)) {
1585                 device_printf(sc->sc_dev,
1586                     "%s: cannot enable txq %d\n",
1587                     __func__,
1588                     qid);
1589                 return EBUSY;
1590         }
1591
1592         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1593
1594         if (qid == IWM_MVM_CMD_QUEUE) {
1595                 /* unactivate before configuration */
1596                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1597                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1598                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1599
1600                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1601
1602                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1603
1604                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1605                 /* Set scheduler window size and frame limit. */
1606                 iwm_write_mem32(sc,
1607                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1608                     sizeof(uint32_t),
1609                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1610                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1611                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1612                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1613
1614                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1615                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1616                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1617                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1618                     IWM_SCD_QUEUE_STTS_REG_MSK);
1619         } else {
1620                 struct iwm_scd_txq_cfg_cmd cmd;
1621                 int error;
1622
1623                 iwm_nic_unlock(sc);
1624
1625                 memset(&cmd, 0, sizeof(cmd));
1626                 cmd.scd_queue = qid;
1627                 cmd.enable = 1;
1628                 cmd.sta_id = sta_id;
1629                 cmd.tx_fifo = fifo;
1630                 cmd.aggregate = 0;
1631                 cmd.window = IWM_FRAME_LIMIT;
1632
1633                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1634                     sizeof(cmd), &cmd);
1635                 if (error) {
1636                         device_printf(sc->sc_dev,
1637                             "cannot enable txq %d\n", qid);
1638                         return error;
1639                 }
1640
1641                 if (!iwm_nic_lock(sc))
1642                         return EBUSY;
1643         }
1644
1645         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1646             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1647
1648         iwm_nic_unlock(sc);
1649
1650         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1651             __func__, qid, fifo);
1652
1653         return 0;
1654 }
1655
1656 static int
1657 iwm_post_alive(struct iwm_softc *sc)
1658 {
1659         int nwords;
1660         int error, chnl;
1661         uint32_t base;
1662
1663         if (!iwm_nic_lock(sc))
1664                 return EBUSY;
1665
1666         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1667         if (sc->sched_base != base) {
1668                 device_printf(sc->sc_dev,
1669                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1670                     __func__, sc->sched_base, base);
1671         }
1672
1673         iwm_ict_reset(sc);
1674
1675         /* Clear TX scheduler state in SRAM. */
1676         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1677             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1678             / sizeof(uint32_t);
1679         error = iwm_write_mem(sc,
1680             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1681             NULL, nwords);
1682         if (error)
1683                 goto out;
1684
1685         /* Set physical address of TX scheduler rings (1KB aligned). */
1686         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1687
1688         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1689
1690         iwm_nic_unlock(sc);
1691
1692         /* enable command channel */
1693         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1694         if (error)
1695                 return error;
1696
1697         if (!iwm_nic_lock(sc))
1698                 return EBUSY;
1699
1700         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1701
1702         /* Enable DMA channels. */
1703         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1704                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1705                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1706                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1707         }
1708
1709         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1710             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1711
1712         /* Enable L1-Active */
1713         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1714                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1715                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1716         }
1717
1718  out:
1719         iwm_nic_unlock(sc);
1720         return error;
1721 }
1722
1723 /*
1724  * NVM read access and content parsing.  We do not support
1725  * external NVM or writing NVM.
1726  * iwlwifi/mvm/nvm.c
1727  */
1728
1729 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
1730 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
1731
1732 /* Default NVM size to read */
1733 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1734
1735 #define IWM_NVM_WRITE_OPCODE 1
1736 #define IWM_NVM_READ_OPCODE 0
1737
1738 /* load nvm chunk response */
1739 enum {
1740         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1741         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1742 };
1743
1744 static int
1745 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1746         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1747 {
1748         struct iwm_nvm_access_cmd nvm_access_cmd = {
1749                 .offset = htole16(offset),
1750                 .length = htole16(length),
1751                 .type = htole16(section),
1752                 .op_code = IWM_NVM_READ_OPCODE,
1753         };
1754         struct iwm_nvm_access_resp *nvm_resp;
1755         struct iwm_rx_packet *pkt;
1756         struct iwm_host_cmd cmd = {
1757                 .id = IWM_NVM_ACCESS_CMD,
1758                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1759                 .data = { &nvm_access_cmd, },
1760         };
1761         int ret, bytes_read, offset_read;
1762         uint8_t *resp_data;
1763
1764         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1765
1766         ret = iwm_send_cmd(sc, &cmd);
1767         if (ret) {
1768                 device_printf(sc->sc_dev,
1769                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1770                 return ret;
1771         }
1772
1773         pkt = cmd.resp_pkt;
1774
1775         /* Extract NVM response */
1776         nvm_resp = (void *)pkt->data;
1777         ret = le16toh(nvm_resp->status);
1778         bytes_read = le16toh(nvm_resp->length);
1779         offset_read = le16toh(nvm_resp->offset);
1780         resp_data = nvm_resp->data;
1781         if (ret) {
1782                 if ((offset != 0) &&
1783                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1784                         /*
1785                          * meaning of NOT_VALID_ADDRESS:
1786                          * driver try to read chunk from address that is
1787                          * multiple of 2K and got an error since addr is empty.
1788                          * meaning of (offset != 0): driver already
1789                          * read valid data from another chunk so this case
1790                          * is not an error.
1791                          */
1792                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1793                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1794                                     offset);
1795                         *len = 0;
1796                         ret = 0;
1797                 } else {
1798                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1799                                     "NVM access command failed with status %d\n", ret);
1800                         ret = EIO;
1801                 }
1802                 goto exit;
1803         }
1804
1805         if (offset_read != offset) {
1806                 device_printf(sc->sc_dev,
1807                     "NVM ACCESS response with invalid offset %d\n",
1808                     offset_read);
1809                 ret = EINVAL;
1810                 goto exit;
1811         }
1812
1813         if (bytes_read > length) {
1814                 device_printf(sc->sc_dev,
1815                     "NVM ACCESS response with too much data "
1816                     "(%d bytes requested, %d bytes received)\n",
1817                     length, bytes_read);
1818                 ret = EINVAL;
1819                 goto exit;
1820         }
1821
1822         /* Write data to NVM */
1823         memcpy(data + offset, resp_data, bytes_read);
1824         *len = bytes_read;
1825
1826  exit:
1827         iwm_free_resp(sc, &cmd);
1828         return ret;
1829 }
1830
1831 /*
1832  * Reads an NVM section completely.
1833  * NICs prior to 7000 family don't have a real NVM, but just read
1834  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1835  * by uCode, we need to manually check in this case that we don't
1836  * overflow and try to read more than the EEPROM size.
1837  * For 7000 family NICs, we supply the maximal size we can read, and
1838  * the uCode fills the response with as much data as we can,
1839  * without overflowing, so no check is needed.
1840  */
1841 static int
1842 iwm_nvm_read_section(struct iwm_softc *sc,
1843         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1844 {
1845         uint16_t seglen, length, offset = 0;
1846         int ret;
1847
1848         /* Set nvm section read length */
1849         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1850
1851         seglen = length;
1852
1853         /* Read the NVM until exhausted (reading less than requested) */
1854         while (seglen == length) {
1855                 /* Check no memory assumptions fail and cause an overflow */
1856                 if ((size_read + offset + length) >
1857                     sc->eeprom_size) {
1858                         device_printf(sc->sc_dev,
1859                             "EEPROM size is too small for NVM\n");
1860                         return ENOBUFS;
1861                 }
1862
1863                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1864                 if (ret) {
1865                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1866                                     "Cannot read NVM from section %d offset %d, length %d\n",
1867                                     section, offset, length);
1868                         return ret;
1869                 }
1870                 offset += seglen;
1871         }
1872
1873         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1874                     "NVM section %d read completed\n", section);
1875         *len = offset;
1876         return 0;
1877 }
1878
1879 /* NVM offsets (in words) definitions */
1880 enum iwm_nvm_offsets {
1881         /* NVM HW-Section offset (in words) definitions */
1882         IWM_HW_ADDR = 0x15,
1883
1884 /* NVM SW-Section offset (in words) definitions */
1885         IWM_NVM_SW_SECTION = 0x1C0,
1886         IWM_NVM_VERSION = 0,
1887         IWM_RADIO_CFG = 1,
1888         IWM_SKU = 2,
1889         IWM_N_HW_ADDRS = 3,
1890         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1891
1892 /* NVM calibration section offset (in words) definitions */
1893         IWM_NVM_CALIB_SECTION = 0x2B8,
1894         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1895 };
1896
1897 enum iwm_8000_nvm_offsets {
1898         /* NVM HW-Section offset (in words) definitions */
1899         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1900         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1901         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1902         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1903         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1904
1905         /* NVM SW-Section offset (in words) definitions */
1906         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1907         IWM_NVM_VERSION_8000 = 0,
1908         IWM_RADIO_CFG_8000 = 0,
1909         IWM_SKU_8000 = 2,
1910         IWM_N_HW_ADDRS_8000 = 3,
1911
1912         /* NVM REGULATORY -Section offset (in words) definitions */
1913         IWM_NVM_CHANNELS_8000 = 0,
1914         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1915         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1916         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1917
1918         /* NVM calibration section offset (in words) definitions */
1919         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1920         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1921 };
1922
1923 /* SKU Capabilities (actual values from NVM definition) */
1924 enum nvm_sku_bits {
1925         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1926         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1927         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1928         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1929 };
1930
1931 /* radio config bits (actual values from NVM definition) */
1932 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1933 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1934 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1935 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1936 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1937 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1938
1939 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1940 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1941 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1942 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1943 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1944 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1945
1946 #define DEFAULT_MAX_TX_POWER 16
1947
1948 /**
1949  * enum iwm_nvm_channel_flags - channel flags in NVM
1950  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1951  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1952  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1953  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1954  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1955  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1956  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1957  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1958  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1959  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1960  */
1961 enum iwm_nvm_channel_flags {
1962         IWM_NVM_CHANNEL_VALID = (1 << 0),
1963         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1964         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1965         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1966         IWM_NVM_CHANNEL_DFS = (1 << 7),
1967         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1968         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1969         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1970         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1971 };
1972
1973 /* lower blocks contain EEPROM image and calibration data */
1974 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
1975 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
1976
1977 /*
1978  * Translate EEPROM flags to net80211.
1979  */
1980 static uint32_t
1981 iwm_eeprom_channel_flags(uint16_t ch_flags)
1982 {
1983         uint32_t nflags;
1984
1985         nflags = 0;
1986         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1987                 nflags |= IEEE80211_CHAN_PASSIVE;
1988         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1989                 nflags |= IEEE80211_CHAN_NOADHOC;
1990         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1991                 nflags |= IEEE80211_CHAN_DFS;
1992                 /* Just in case. */
1993                 nflags |= IEEE80211_CHAN_NOADHOC;
1994         }
1995
1996         return (nflags);
1997 }
1998
1999 static void
2000 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2001     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2002     const uint8_t bands[])
2003 {
2004         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2005         uint32_t nflags;
2006         uint16_t ch_flags;
2007         uint8_t ieee;
2008         int error;
2009
2010         for (; ch_idx < ch_num; ch_idx++) {
2011                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2012                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2013                         ieee = iwm_nvm_channels[ch_idx];
2014                 else
2015                         ieee = iwm_nvm_channels_8000[ch_idx];
2016
2017                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2018                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2019                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2020                             ieee, ch_flags,
2021                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2022                             "5.2" : "2.4");
2023                         continue;
2024                 }
2025
2026                 nflags = iwm_eeprom_channel_flags(ch_flags);
2027                 error = ieee80211_add_channel(chans, maxchans, nchans,
2028                     ieee, 0, 0, nflags, bands);
2029                 if (error != 0)
2030                         break;
2031
2032                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2033                     "Ch. %d Flags %x [%sGHz] - Added\n",
2034                     ieee, ch_flags,
2035                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2036                     "5.2" : "2.4");
2037         }
2038 }
2039
2040 static void
2041 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2042     struct ieee80211_channel chans[])
2043 {
2044         struct iwm_softc *sc = ic->ic_softc;
2045         struct iwm_nvm_data *data = sc->nvm_data;
2046         uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2047         size_t ch_num;
2048
2049         memset(bands, 0, sizeof(bands));
2050         /* 1-13: 11b/g channels. */
2051         setbit(bands, IEEE80211_MODE_11B);
2052         setbit(bands, IEEE80211_MODE_11G);
2053         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2054             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2055
2056         /* 14: 11b channel only. */
2057         clrbit(bands, IEEE80211_MODE_11G);
2058         iwm_add_channel_band(sc, chans, maxchans, nchans,
2059             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2060
2061         if (data->sku_cap_band_52GHz_enable) {
2062                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2063                         ch_num = nitems(iwm_nvm_channels);
2064                 else
2065                         ch_num = nitems(iwm_nvm_channels_8000);
2066                 memset(bands, 0, sizeof(bands));
2067                 setbit(bands, IEEE80211_MODE_11A);
2068                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2069                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2070         }
2071 }
2072
2073 static void
2074 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2075         const uint16_t *mac_override, const uint16_t *nvm_hw)
2076 {
2077         const uint8_t *hw_addr;
2078
2079         if (mac_override) {
2080                 static const uint8_t reserved_mac[] = {
2081                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2082                 };
2083
2084                 hw_addr = (const uint8_t *)(mac_override +
2085                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2086
2087                 /*
2088                  * Store the MAC address from MAO section.
2089                  * No byte swapping is required in MAO section
2090                  */
2091                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2092
2093                 /*
2094                  * Force the use of the OTP MAC address in case of reserved MAC
2095                  * address in the NVM, or if address is given but invalid.
2096                  */
2097                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2098                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2099                     iwm_is_valid_ether_addr(data->hw_addr) &&
2100                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2101                         return;
2102
2103                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2104                     "%s: mac address from nvm override section invalid\n",
2105                     __func__);
2106         }
2107
2108         if (nvm_hw) {
2109                 /* read the mac address from WFMP registers */
2110                 uint32_t mac_addr0 =
2111                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2112                 uint32_t mac_addr1 =
2113                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2114
2115                 hw_addr = (const uint8_t *)&mac_addr0;
2116                 data->hw_addr[0] = hw_addr[3];
2117                 data->hw_addr[1] = hw_addr[2];
2118                 data->hw_addr[2] = hw_addr[1];
2119                 data->hw_addr[3] = hw_addr[0];
2120
2121                 hw_addr = (const uint8_t *)&mac_addr1;
2122                 data->hw_addr[4] = hw_addr[1];
2123                 data->hw_addr[5] = hw_addr[0];
2124
2125                 return;
2126         }
2127
2128         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2129         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2130 }
2131
2132 static int
2133 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2134             const uint16_t *phy_sku)
2135 {
2136         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2137                 return le16_to_cpup(nvm_sw + IWM_SKU);
2138
2139         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2140 }
2141
2142 static int
2143 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2144 {
2145         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2146                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2147         else
2148                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2149                                                 IWM_NVM_VERSION_8000));
2150 }
2151
2152 static int
2153 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2154                   const uint16_t *phy_sku)
2155 {
2156         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2157                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2158
2159         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2160 }
2161
2162 static int
2163 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2164 {
2165         int n_hw_addr;
2166
2167         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2168                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2169
2170         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2171
2172         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2173 }
2174
2175 static void
2176 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2177                   uint32_t radio_cfg)
2178 {
2179         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2180                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2181                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2182                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2183                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2184                 return;
2185         }
2186
2187         /* set the radio configuration for family 8000 */
2188         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2189         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2190         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2191         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2192         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2193         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2194 }
2195
2196 static int
2197 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2198                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2199 {
2200 #ifdef notyet /* for FAMILY 9000 */
2201         if (cfg->mac_addr_from_csr) {
2202                 iwm_set_hw_address_from_csr(sc, data);
2203         } else
2204 #endif
2205         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2206                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2207
2208                 /* The byte order is little endian 16 bit, meaning 214365 */
2209                 data->hw_addr[0] = hw_addr[1];
2210                 data->hw_addr[1] = hw_addr[0];
2211                 data->hw_addr[2] = hw_addr[3];
2212                 data->hw_addr[3] = hw_addr[2];
2213                 data->hw_addr[4] = hw_addr[5];
2214                 data->hw_addr[5] = hw_addr[4];
2215         } else {
2216                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2217         }
2218
2219         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2220                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2221                 return EINVAL;
2222         }
2223
2224         return 0;
2225 }
2226
2227 static struct iwm_nvm_data *
2228 iwm_parse_nvm_data(struct iwm_softc *sc,
2229                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2230                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2231                    const uint16_t *phy_sku, const uint16_t *regulatory)
2232 {
2233         struct iwm_nvm_data *data;
2234         uint32_t sku, radio_cfg;
2235
2236         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2237                 data = kmalloc(sizeof(*data) +
2238                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2239                     M_DEVBUF, M_WAITOK | M_ZERO);
2240         } else {
2241                 data = kmalloc(sizeof(*data) +
2242                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2243                     M_DEVBUF, M_WAITOK | M_ZERO);
2244         }
2245         if (!data)
2246                 return NULL;
2247
2248         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2249
2250         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2251         iwm_set_radio_cfg(sc, data, radio_cfg);
2252
2253         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2254         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2255         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2256         data->sku_cap_11n_enable = 0;
2257
2258         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2259
2260         /* If no valid mac address was found - bail out */
2261         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2262                 kfree(data, M_DEVBUF);
2263                 return NULL;
2264         }
2265
2266         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2267                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2268                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2269         } else {
2270                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2271                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2272         }
2273
2274         return data;
2275 }
2276
2277 static void
2278 iwm_free_nvm_data(struct iwm_nvm_data *data)
2279 {
2280         if (data != NULL)
2281                 kfree(data, M_DEVBUF);
2282 }
2283
2284 static struct iwm_nvm_data *
2285 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2286 {
2287         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2288
2289         /* Checking for required sections */
2290         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2291                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2292                     !sections[sc->nvm_hw_section_num].data) {
2293                         device_printf(sc->sc_dev,
2294                             "Can't parse empty OTP/NVM sections\n");
2295                         return NULL;
2296                 }
2297         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2298                 /* SW and REGULATORY sections are mandatory */
2299                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2300                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2301                         device_printf(sc->sc_dev,
2302                             "Can't parse empty OTP/NVM sections\n");
2303                         return NULL;
2304                 }
2305                 /* MAC_OVERRIDE or at least HW section must exist */
2306                 if (!sections[sc->nvm_hw_section_num].data &&
2307                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2308                         device_printf(sc->sc_dev,
2309                             "Can't parse mac_address, empty sections\n");
2310                         return NULL;
2311                 }
2312
2313                 /* PHY_SKU section is mandatory in B0 */
2314                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2315                         device_printf(sc->sc_dev,
2316                             "Can't parse phy_sku in B0, empty sections\n");
2317                         return NULL;
2318                 }
2319         } else {
2320                 panic("unknown device family %d\n", sc->sc_device_family);
2321         }
2322
2323         hw = (const uint16_t *) sections[sc->nvm_hw_section_num].data;
2324         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2325         calib = (const uint16_t *)
2326             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2327         regulatory = (const uint16_t *)
2328             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2329         mac_override = (const uint16_t *)
2330             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2331         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2332
2333         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2334             phy_sku, regulatory);
2335 }
2336
2337 static int
2338 iwm_nvm_init(struct iwm_softc *sc)
2339 {
2340         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2341         int i, ret, section;
2342         uint32_t size_read = 0;
2343         uint8_t *nvm_buffer, *temp;
2344         uint16_t len;
2345
2346         memset(nvm_sections, 0, sizeof(nvm_sections));
2347
2348         if (sc->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2349                 return EINVAL;
2350
2351         /* load NVM values from nic */
2352         /* Read From FW NVM */
2353         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2354
2355         nvm_buffer = kmalloc(sc->eeprom_size, M_DEVBUF, M_INTWAIT | M_ZERO);
2356         if (!nvm_buffer)
2357                 return ENOMEM;
2358         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2359                 /* we override the constness for initial read */
2360                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2361                                            &len, size_read);
2362                 if (ret)
2363                         continue;
2364                 size_read += len;
2365                 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2366                 if (!temp) {
2367                         ret = ENOMEM;
2368                         break;
2369                 }
2370                 memcpy(temp, nvm_buffer, len);
2371
2372                 nvm_sections[section].data = temp;
2373                 nvm_sections[section].length = len;
2374         }
2375         if (!size_read)
2376                 device_printf(sc->sc_dev, "OTP is blank\n");
2377         kfree(nvm_buffer, M_DEVBUF);
2378
2379         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2380         if (!sc->nvm_data)
2381                 return EINVAL;
2382         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2383                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2384
2385         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2386                 if (nvm_sections[i].data != NULL)
2387                         kfree(nvm_sections[i].data, M_DEVBUF);
2388         }
2389
2390         return 0;
2391 }
2392
2393 /*
2394  * Firmware loading gunk.  This is kind of a weird hybrid between the
2395  * iwn driver and the Linux iwlwifi driver.
2396  */
2397
2398 static int
2399 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2400         const uint8_t *section, uint32_t byte_cnt)
2401 {
2402         int error = EINVAL;
2403         uint32_t chunk_sz, offset;
2404
2405         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2406
2407         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2408                 uint32_t addr, len;
2409                 const uint8_t *data;
2410
2411                 addr = dst_addr + offset;
2412                 len = MIN(chunk_sz, byte_cnt - offset);
2413                 data = section + offset;
2414
2415                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2416                 if (error)
2417                         break;
2418         }
2419
2420         return error;
2421 }
2422
2423 static int
2424 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2425         const uint8_t *chunk, uint32_t byte_cnt)
2426 {
2427         struct iwm_dma_info *dma = &sc->fw_dma;
2428         int error;
2429
2430         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2431         memcpy(dma->vaddr, chunk, byte_cnt);
2432         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2433
2434         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2435             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2436                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2437                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2438         }
2439
2440         sc->sc_fw_chunk_done = 0;
2441
2442         if (!iwm_nic_lock(sc))
2443                 return EBUSY;
2444
2445         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2446             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2447         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2448             dst_addr);
2449         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2450             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2451         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2452             (iwm_get_dma_hi_addr(dma->paddr)
2453               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2454         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2455             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2456             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2457             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2458         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2459             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2460             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2461             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2462
2463         iwm_nic_unlock(sc);
2464
2465         /* wait 1s for this segment to load */
2466         error = 0;
2467         while (!sc->sc_fw_chunk_done) {
2468 #if defined(__DragonFly__)
2469                 error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2470 #else
2471                 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2472 #endif
2473                 if (error)
2474                         break;
2475         }
2476
2477         if (!sc->sc_fw_chunk_done) {
2478                 device_printf(sc->sc_dev,
2479                     "fw chunk addr 0x%x len %d failed to load\n",
2480                     dst_addr, byte_cnt);
2481         }
2482
2483         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2484             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2485                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2486                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2487                 iwm_nic_unlock(sc);
2488         }
2489
2490         return error;
2491 }
2492
2493 int
2494 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2495     int cpu, int *first_ucode_section)
2496 {
2497         int shift_param;
2498         int i, error = 0, sec_num = 0x1;
2499         uint32_t val, last_read_idx = 0;
2500         const void *data;
2501         uint32_t dlen;
2502         uint32_t offset;
2503
2504         if (cpu == 1) {
2505                 shift_param = 0;
2506                 *first_ucode_section = 0;
2507         } else {
2508                 shift_param = 16;
2509                 (*first_ucode_section)++;
2510         }
2511
2512         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2513                 last_read_idx = i;
2514                 data = fws->fw_sect[i].fws_data;
2515                 dlen = fws->fw_sect[i].fws_len;
2516                 offset = fws->fw_sect[i].fws_devoff;
2517
2518                 /*
2519                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2520                  * CPU1 to CPU2.
2521                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2522                  * CPU2 non paged to CPU2 paging sec.
2523                  */
2524                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2525                     offset == IWM_PAGING_SEPARATOR_SECTION)
2526                         break;
2527
2528                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2529                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2530                     i, offset, dlen, cpu);
2531
2532                 if (dlen > sc->sc_fwdmasegsz) {
2533                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2534                             "chunk %d too large (%d bytes)\n", i, dlen);
2535                         error = EFBIG;
2536                 } else {
2537                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2538                 }
2539                 if (error) {
2540                         device_printf(sc->sc_dev,
2541                             "could not load firmware chunk %d (error %d)\n",
2542                             i, error);
2543                         return error;
2544                 }
2545
2546                 /* Notify the ucode of the loaded section number and status */
2547                 if (iwm_nic_lock(sc)) {
2548                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2549                         val = val | (sec_num << shift_param);
2550                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2551                         sec_num = (sec_num << 1) | 0x1;
2552                         iwm_nic_unlock(sc);
2553
2554                         /*
2555                          * The firmware won't load correctly without this delay.
2556                          */
2557                         DELAY(8000);
2558                 }
2559         }
2560
2561         *first_ucode_section = last_read_idx;
2562
2563         if (iwm_nic_lock(sc)) {
2564                 if (cpu == 1)
2565                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2566                 else
2567                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2568                 iwm_nic_unlock(sc);
2569         }
2570
2571         return 0;
2572 }
2573
2574 int
2575 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2576 {
2577         struct iwm_fw_sects *fws;
2578         int error = 0;
2579         int first_ucode_section;
2580
2581         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2582             ucode_type);
2583
2584         fws = &sc->sc_fw.fw_sects[ucode_type];
2585
2586         /* configure the ucode to be ready to get the secured image */
2587         /* release CPU reset */
2588         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2589
2590         /* load to FW the binary Secured sections of CPU1 */
2591         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2592         if (error)
2593                 return error;
2594
2595         /* load to FW the binary sections of CPU2 */
2596         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2597 }
2598
2599 static int
2600 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2601 {
2602         struct iwm_fw_sects *fws;
2603         int error, i;
2604         const void *data;
2605         uint32_t dlen;
2606         uint32_t offset;
2607
2608         sc->sc_uc.uc_intr = 0;
2609
2610         fws = &sc->sc_fw.fw_sects[ucode_type];
2611         for (i = 0; i < fws->fw_count; i++) {
2612                 data = fws->fw_sect[i].fws_data;
2613                 dlen = fws->fw_sect[i].fws_len;
2614                 offset = fws->fw_sect[i].fws_devoff;
2615                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2616                     "LOAD FIRMWARE type %d offset %u len %d\n",
2617                     ucode_type, offset, dlen);
2618                 if (dlen > sc->sc_fwdmasegsz) {
2619                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2620                             "chunk %d too large (%d bytes)\n", i, dlen);
2621                         error = EFBIG;
2622                 } else {
2623                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2624                 }
2625                 if (error) {
2626                         device_printf(sc->sc_dev,
2627                             "could not load firmware chunk %u of %u "
2628                             "(error=%d)\n", i, fws->fw_count, error);
2629                         return error;
2630                 }
2631         }
2632
2633         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2634
2635         return 0;
2636 }
2637
2638 static int
2639 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2640 {
2641         int error, w;
2642
2643         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2644                 error = iwm_load_firmware_8000(sc, ucode_type);
2645         else
2646                 error = iwm_load_firmware_7000(sc, ucode_type);
2647         if (error)
2648                 return error;
2649
2650         /* wait for the firmware to load */
2651         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2652 #if defined(__DragonFly__)
2653                 error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2654 #else
2655                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2656 #endif
2657         }
2658         if (error || !sc->sc_uc.uc_ok) {
2659                 device_printf(sc->sc_dev, "could not load firmware\n");
2660                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2661                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2662                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2663                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2664                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2665                 }
2666         }
2667
2668         /*
2669          * Give the firmware some time to initialize.
2670          * Accessing it too early causes errors.
2671          */
2672         lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2673
2674         return error;
2675 }
2676
2677 static int
2678 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2679 {
2680         int error;
2681
2682         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2683
2684         if ((error = iwm_nic_init(sc)) != 0) {
2685                 device_printf(sc->sc_dev, "unable to init nic\n");
2686                 return error;
2687         }
2688
2689         /* make sure rfkill handshake bits are cleared */
2690         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2691         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2692             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2693
2694         /* clear (again), then enable host interrupts */
2695         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2696         iwm_enable_interrupts(sc);
2697
2698         /* really make sure rfkill handshake bits are cleared */
2699         /* maybe we should write a few times more?  just to make sure */
2700         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2701         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702
2703         /* Load the given image to the HW */
2704         return iwm_load_firmware(sc, ucode_type);
2705 }
2706
2707 static int
2708 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2709 {
2710         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2711                 .valid = htole32(valid_tx_ant),
2712         };
2713
2714         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2715             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2716 }
2717
2718 static int
2719 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2720 {
2721         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2722         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2723
2724         /* Set parameters */
2725         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2726         phy_cfg_cmd.calib_control.event_trigger =
2727             sc->sc_default_calib[ucode_type].event_trigger;
2728         phy_cfg_cmd.calib_control.flow_trigger =
2729             sc->sc_default_calib[ucode_type].flow_trigger;
2730
2731         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2732             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2733         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2734             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2735 }
2736
2737 static int
2738 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2739         enum iwm_ucode_type ucode_type)
2740 {
2741         enum iwm_ucode_type old_type = sc->sc_uc_current;
2742         int error;
2743
2744         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2745                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2746                         error);
2747                 return error;
2748         }
2749
2750         sc->sc_uc_current = ucode_type;
2751         error = iwm_start_fw(sc, ucode_type);
2752         if (error) {
2753                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2754                 sc->sc_uc_current = old_type;
2755                 return error;
2756         }
2757
2758         error = iwm_post_alive(sc);
2759         if (error) {
2760                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2761         }
2762         return error;
2763 }
2764
2765 /*
2766  * mvm misc bits
2767  */
2768
2769 static int
2770 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2771 {
2772         int error;
2773
2774         /* do not operate with rfkill switch turned on */
2775         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2776                 device_printf(sc->sc_dev,
2777                     "radio is disabled by hardware switch\n");
2778                 return EPERM;
2779         }
2780
2781         sc->sc_init_complete = 0;
2782         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2783             IWM_UCODE_TYPE_INIT)) != 0) {
2784                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2785                 return error;
2786         }
2787
2788         if (justnvm) {
2789                 if ((error = iwm_nvm_init(sc)) != 0) {
2790                         device_printf(sc->sc_dev, "failed to read nvm\n");
2791                         return error;
2792                 }
2793                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2794
2795                 return 0;
2796         }
2797
2798         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2799                 device_printf(sc->sc_dev,
2800                     "failed to send bt coex configuration: %d\n", error);
2801                 return error;
2802         }
2803
2804         /* Init Smart FIFO. */
2805         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2806         if (error != 0)
2807                 return error;
2808
2809         /* Send TX valid antennas before triggering calibrations */
2810         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2811                 device_printf(sc->sc_dev,
2812                     "failed to send antennas before calibration: %d\n", error);
2813                 return error;
2814         }
2815
2816         /*
2817          * Send phy configurations command to init uCode
2818          * to start the 16.0 uCode init image internal calibrations.
2819          */
2820         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2821                 device_printf(sc->sc_dev,
2822                     "%s: failed to run internal calibration: %d\n",
2823                     __func__, error);
2824                 return error;
2825         }
2826
2827         /*
2828          * Nothing to do but wait for the init complete notification
2829          * from the firmware
2830          */
2831         while (!sc->sc_init_complete) {
2832 #if defined(__DragonFly__)
2833                 error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
2834                                  0, "iwminit", 2*hz);
2835 #else
2836                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2837                                  0, "iwminit", 2*hz);
2838 #endif
2839                 if (error) {
2840                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2841                                 sc->sc_init_complete);
2842                         break;
2843                 }
2844         }
2845
2846         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2847             sc->sc_init_complete ? "" : "not ");
2848
2849         return error;
2850 }
2851
2852 /*
2853  * receive side
2854  */
2855
2856 /* (re)stock rx ring, called at init-time and at runtime */
2857 static int
2858 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2859 {
2860         struct iwm_rx_ring *ring = &sc->rxq;
2861         struct iwm_rx_data *data = &ring->data[idx];
2862         struct mbuf *m;
2863         bus_dmamap_t dmamap = NULL;
2864         bus_dma_segment_t seg;
2865         int nsegs, error;
2866
2867         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2868         if (m == NULL)
2869                 return ENOBUFS;
2870
2871         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2872 #if defined(__DragonFly__)
2873         error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2874             m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2875 #else
2876         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2877             &seg, &nsegs, BUS_DMA_NOWAIT);
2878 #endif
2879         if (error != 0) {
2880                 device_printf(sc->sc_dev,
2881                     "%s: can't map mbuf, error %d\n", __func__, error);
2882                 goto fail;
2883         }
2884
2885         if (data->m != NULL)
2886                 bus_dmamap_unload(ring->data_dmat, data->map);
2887
2888         /* Swap ring->spare_map with data->map */
2889         dmamap = data->map;
2890         data->map = ring->spare_map;
2891         ring->spare_map = dmamap;
2892
2893         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2894         data->m = m;
2895
2896         /* Update RX descriptor. */
2897         KKASSERT((seg.ds_addr & 255) == 0);
2898         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2899         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2900             BUS_DMASYNC_PREWRITE);
2901
2902         return 0;
2903 fail:
2904         m_freem(m);
2905         return error;
2906 }
2907
2908 #define IWM_RSSI_OFFSET 50
2909 static int
2910 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2911 {
2912         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2913         uint32_t agc_a, agc_b;
2914         uint32_t val;
2915
2916         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2917         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2918         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2919
2920         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2921         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2922         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2923
2924         /*
2925          * dBm = rssi dB - agc dB - constant.
2926          * Higher AGC (higher radio gain) means lower signal.
2927          */
2928         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2929         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2930         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2931
2932         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2933             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2934             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2935
2936         return max_rssi_dbm;
2937 }
2938
2939 /*
2940  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2941  * values are reported by the fw as positive values - need to negate
2942  * to obtain their dBM.  Account for missing antennas by replacing 0
2943  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2944  */
2945 static int
2946 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2947 {
2948         int energy_a, energy_b, energy_c, max_energy;
2949         uint32_t val;
2950
2951         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2952         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2953             IWM_RX_INFO_ENERGY_ANT_A_POS;
2954         energy_a = energy_a ? -energy_a : -256;
2955         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2956             IWM_RX_INFO_ENERGY_ANT_B_POS;
2957         energy_b = energy_b ? -energy_b : -256;
2958         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2959             IWM_RX_INFO_ENERGY_ANT_C_POS;
2960         energy_c = energy_c ? -energy_c : -256;
2961         max_energy = MAX(energy_a, energy_b);
2962         max_energy = MAX(max_energy, energy_c);
2963
2964         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2965             "energy In A %d B %d C %d , and max %d\n",
2966             energy_a, energy_b, energy_c, max_energy);
2967
2968         return max_energy;
2969 }
2970
2971 static void
2972 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2973         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2974 {
2975         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2976
2977         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2978         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2979
2980         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2981 }
2982
2983 /*
2984  * Retrieve the average noise (in dBm) among receivers.
2985  */
2986 static int
2987 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2988 {
2989         int i, total, nbant, noise;
2990
2991         total = nbant = noise = 0;
2992         for (i = 0; i < 3; i++) {
2993                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2994                 if (noise) {
2995                         total += noise;
2996                         nbant++;
2997                 }
2998         }
2999
3000         /* There should be at least one antenna but check anyway. */
3001         return (nbant == 0) ? -127 : (total / nbant) - 107;
3002 }
3003
3004 /*
3005  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3006  *
3007  * Handles the actual data of the Rx packet from the fw
3008  */
3009 static void
3010 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3011         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3012 {
3013         struct ieee80211com *ic = &sc->sc_ic;
3014         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3015         struct ieee80211_frame *wh;
3016         struct ieee80211_node *ni;
3017         struct ieee80211_rx_stats rxs;
3018         struct mbuf *m;
3019         struct iwm_rx_phy_info *phy_info;
3020         struct iwm_rx_mpdu_res_start *rx_res;
3021         uint32_t len;
3022         uint32_t rx_pkt_status;
3023         int rssi;
3024
3025         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3026
3027         phy_info = &sc->sc_last_phy_info;
3028         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3029         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3030         len = le16toh(rx_res->byte_count);
3031         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3032
3033         m = data->m;
3034         m->m_data = pkt->data + sizeof(*rx_res);
3035         m->m_pkthdr.len = m->m_len = len;
3036
3037         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3038                 device_printf(sc->sc_dev,
3039                     "dsp size out of range [0,20]: %d\n",
3040                     phy_info->cfg_phy_cnt);
3041                 return;
3042         }
3043
3044         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3045             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3046                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3047                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3048                 return; /* drop */
3049         }
3050
3051         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3052                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3053         } else {
3054                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3055         }
3056         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
3057         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
3058
3059         /* replenish ring for the buffer we're going to feed to the sharks */
3060         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3061                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3062                     __func__);
3063                 return;
3064         }
3065
3066         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3067
3068         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3069             "%s: phy_info: channel=%d, flags=0x%08x\n",
3070             __func__,
3071             le16toh(phy_info->channel),
3072             le16toh(phy_info->phy_flags));
3073
3074         /*
3075          * Populate an RX state struct with the provided information.
3076          */
3077         bzero(&rxs, sizeof(rxs));
3078         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3079         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3080         rxs.c_ieee = le16toh(phy_info->channel);
3081         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3082                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3083         } else {
3084                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3085         }
3086         rxs.rssi = rssi - sc->sc_noise;
3087         rxs.nf = sc->sc_noise;
3088
3089         if (ieee80211_radiotap_active_vap(vap)) {
3090                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3091
3092                 tap->wr_flags = 0;
3093                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3094                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3095                 tap->wr_chan_freq = htole16(rxs.c_freq);
3096                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3097                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3098                 tap->wr_dbm_antsignal = (int8_t)rssi;
3099                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3100                 tap->wr_tsft = phy_info->system_timestamp;
3101                 switch (phy_info->rate) {
3102                 /* CCK rates. */
3103                 case  10: tap->wr_rate =   2; break;
3104                 case  20: tap->wr_rate =   4; break;
3105                 case  55: tap->wr_rate =  11; break;
3106                 case 110: tap->wr_rate =  22; break;
3107                 /* OFDM rates. */
3108                 case 0xd: tap->wr_rate =  12; break;
3109                 case 0xf: tap->wr_rate =  18; break;
3110                 case 0x5: tap->wr_rate =  24; break;
3111                 case 0x7: tap->wr_rate =  36; break;
3112                 case 0x9: tap->wr_rate =  48; break;
3113                 case 0xb: tap->wr_rate =  72; break;
3114                 case 0x1: tap->wr_rate =  96; break;
3115                 case 0x3: tap->wr_rate = 108; break;
3116                 /* Unknown rate: should not happen. */
3117                 default:  tap->wr_rate =   0;
3118                 }
3119         }
3120
3121         IWM_UNLOCK(sc);
3122         if (ni != NULL) {
3123                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3124                 ieee80211_input_mimo(ni, m, &rxs);
3125                 ieee80211_free_node(ni);
3126         } else {
3127                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3128                 ieee80211_input_mimo_all(ic, m, &rxs);
3129         }
3130         IWM_LOCK(sc);
3131 }
3132
3133 static int
3134 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3135         struct iwm_node *in)
3136 {
3137         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3138         struct ieee80211_node *ni = &in->in_ni;
3139         struct ieee80211vap *vap = ni->ni_vap;
3140         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3141         int failack = tx_resp->failure_frame;
3142
3143         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3144
3145         /* Update rate control statistics. */
3146         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3147             __func__,
3148             (int) le16toh(tx_resp->status.status),
3149             (int) le16toh(tx_resp->status.sequence),
3150             tx_resp->frame_count,
3151             tx_resp->bt_kill_count,
3152             tx_resp->failure_rts,
3153             tx_resp->failure_frame,
3154             le32toh(tx_resp->initial_rate),
3155             (int) le16toh(tx_resp->wireless_media_time));
3156
3157         if (status != IWM_TX_STATUS_SUCCESS &&
3158             status != IWM_TX_STATUS_DIRECT_DONE) {
3159                 ieee80211_ratectl_tx_complete(vap, ni,
3160                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3161                 return (1);
3162         } else {
3163                 ieee80211_ratectl_tx_complete(vap, ni,
3164                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3165                 return (0);
3166         }
3167 }
3168
3169 static void
3170 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3171         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3172 {
3173         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3174         int idx = cmd_hdr->idx;
3175         int qid = cmd_hdr->qid;
3176         struct iwm_tx_ring *ring = &sc->txq[qid];
3177         struct iwm_tx_data *txd = &ring->data[idx];
3178         struct iwm_node *in = txd->in;
3179         struct mbuf *m = txd->m;
3180         int status;
3181
3182         KASSERT(txd->done == 0, ("txd not done"));
3183         KASSERT(txd->in != NULL, ("txd without node"));
3184         KASSERT(txd->m != NULL, ("txd without mbuf"));
3185
3186         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3187
3188         sc->sc_tx_timer = 0;
3189
3190         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3191
3192         /* Unmap and free mbuf. */
3193         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3194         bus_dmamap_unload(ring->data_dmat, txd->map);
3195
3196         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3197             "free txd %p, in %p\n", txd, txd->in);
3198         txd->done = 1;
3199         txd->m = NULL;
3200         txd->in = NULL;
3201
3202         ieee80211_tx_complete(&in->in_ni, m, status);
3203
3204         if (--ring->queued < IWM_TX_RING_LOMARK) {
3205                 sc->qfullmsk &= ~(1 << ring->qid);
3206                 if (sc->qfullmsk == 0) {
3207                         /*
3208                          * Well, we're in interrupt context, but then again
3209                          * I guess net80211 does all sorts of stunts in
3210                          * interrupt context, so maybe this is no biggie.
3211                          */
3212                         iwm_start(sc);
3213                 }
3214         }
3215 }
3216
3217 /*
3218  * transmit side
3219  */
3220
3221 /*
3222  * Process a "command done" firmware notification.  This is where we wakeup
3223  * processes waiting for a synchronous command completion.
3224  * from if_iwn
3225  */
3226 static void
3227 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3228 {
3229         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3230         struct iwm_tx_data *data;
3231
3232         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3233                 return; /* Not a command ack. */
3234         }
3235
3236         data = &ring->data[pkt->hdr.idx];
3237
3238         /* If the command was mapped in an mbuf, free it. */
3239         if (data->m != NULL) {
3240                 bus_dmamap_sync(ring->data_dmat, data->map,
3241                     BUS_DMASYNC_POSTWRITE);
3242                 bus_dmamap_unload(ring->data_dmat, data->map);
3243                 m_freem(data->m);
3244                 data->m = NULL;
3245         }
3246         wakeup(&ring->desc[pkt->hdr.idx]);
3247 }
3248
3249 #if 0
3250 /*
3251  * necessary only for block ack mode
3252  */
3253 void
3254 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3255         uint16_t len)
3256 {
3257         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3258         uint16_t w_val;
3259
3260         scd_bc_tbl = sc->sched_dma.vaddr;
3261
3262         len += 8; /* magic numbers came naturally from paris */
3263         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3264                 len = roundup(len, 4) / 4;
3265
3266         w_val = htole16(sta_id << 12 | len);
3267
3268         /* Update TX scheduler. */
3269         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3270         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3271             BUS_DMASYNC_PREWRITE);
3272
3273         /* I really wonder what this is ?!? */
3274         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3275                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3276                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3277                     BUS_DMASYNC_PREWRITE);
3278         }
3279 }
3280 #endif
3281
3282 /*
3283  * Take an 802.11 (non-n) rate, find the relevant rate
3284  * table entry.  return the index into in_ridx[].
3285  *
3286  * The caller then uses that index back into in_ridx
3287  * to figure out the rate index programmed /into/
3288  * the firmware for this given node.
3289  */
3290 static int
3291 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3292     uint8_t rate)
3293 {
3294         int i;
3295         uint8_t r;
3296
3297         for (i = 0; i < nitems(in->in_ridx); i++) {
3298                 r = iwm_rates[in->in_ridx[i]].rate;
3299                 if (rate == r)
3300                         return (i);
3301         }
3302         /* XXX Return the first */
3303         /* XXX TODO: have it return the /lowest/ */
3304         return (0);
3305 }
3306
3307 /*
3308  * Fill in the rate related information for a transmit command.
3309  */
3310 static const struct iwm_rate *
3311 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3312         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3313 {
3314         struct ieee80211com *ic = &sc->sc_ic;
3315         struct ieee80211_node *ni = &in->in_ni;
3316         const struct iwm_rate *rinfo;
3317         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3318         int ridx, rate_flags;
3319
3320         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3321         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3322
3323         /*
3324          * XXX TODO: everything about the rate selection here is terrible!
3325          */
3326
3327         if (type == IEEE80211_FC0_TYPE_DATA) {
3328                 int i;
3329                 /* for data frames, use RS table */
3330                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3331                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3332                 ridx = in->in_ridx[i];
3333
3334                 /* This is the index into the programmed table */
3335                 tx->initial_rate_index = i;
3336                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3337                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3338                     "%s: start with i=%d, txrate %d\n",
3339                     __func__, i, iwm_rates[ridx].rate);
3340         } else {
3341                 /*
3342                  * For non-data, use the lowest supported rate for the given
3343                  * operational mode.
3344                  *
3345                  * Note: there may not be any rate control information available.
3346                  * This driver currently assumes if we're transmitting data
3347                  * frames, use the rate control table.  Grr.
3348                  *
3349                  * XXX TODO: use the configured rate for the traffic type!
3350                  * XXX TODO: this should be per-vap, not curmode; as we later
3351                  * on we'll want to handle off-channel stuff (eg TDLS).
3352                  */
3353                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3354                         /*
3355                          * XXX this assumes the mode is either 11a or not 11a;
3356                          * definitely won't work for 11n.
3357                          */
3358                         ridx = IWM_RIDX_OFDM;
3359                 } else {
3360                         ridx = IWM_RIDX_CCK;
3361                 }
3362         }
3363
3364         rinfo = &iwm_rates[ridx];
3365
3366         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3367             __func__, ridx,
3368             rinfo->rate,
3369             !! (IWM_RIDX_IS_CCK(ridx))
3370             );
3371
3372         /* XXX TODO: hard-coded TX antenna? */
3373         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3374         if (IWM_RIDX_IS_CCK(ridx))
3375                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3376         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3377
3378         return rinfo;
3379 }
3380
3381 #define TB0_SIZE 16
3382 static int
3383 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3384 {
3385         struct ieee80211com *ic = &sc->sc_ic;
3386         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3387         struct iwm_node *in = IWM_NODE(ni);
3388         struct iwm_tx_ring *ring;
3389         struct iwm_tx_data *data;
3390         struct iwm_tfd *desc;
3391         struct iwm_device_cmd *cmd;
3392         struct iwm_tx_cmd *tx;
3393         struct ieee80211_frame *wh;
3394         struct ieee80211_key *k = NULL;
3395 #if !defined(__DragonFly__)
3396         struct mbuf *m1;
3397 #endif
3398         const struct iwm_rate *rinfo;
3399         uint32_t flags;
3400         u_int hdrlen;
3401         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3402         int nsegs;
3403         uint8_t tid, type;
3404         int i, totlen, error, pad;
3405
3406         wh = mtod(m, struct ieee80211_frame *);
3407         hdrlen = ieee80211_anyhdrsize(wh);
3408         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3409         tid = 0;
3410         ring = &sc->txq[ac];
3411         desc = &ring->desc[ring->cur];
3412         memset(desc, 0, sizeof(*desc));
3413         data = &ring->data[ring->cur];
3414
3415         /* Fill out iwm_tx_cmd to send to the firmware */
3416         cmd = &ring->cmd[ring->cur];
3417         cmd->hdr.code = IWM_TX_CMD;
3418         cmd->hdr.flags = 0;
3419         cmd->hdr.qid = ring->qid;
3420         cmd->hdr.idx = ring->cur;
3421
3422         tx = (void *)cmd->data;
3423         memset(tx, 0, sizeof(*tx));
3424
3425         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3426
3427         /* Encrypt the frame if need be. */
3428         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3429                 /* Retrieve key for TX && do software encryption. */
3430                 k = ieee80211_crypto_encap(ni, m);
3431                 if (k == NULL) {
3432                         m_freem(m);
3433                         return (ENOBUFS);
3434                 }
3435                 /* 802.11 header may have moved. */
3436                 wh = mtod(m, struct ieee80211_frame *);
3437         }
3438
3439         if (ieee80211_radiotap_active_vap(vap)) {
3440                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3441
3442                 tap->wt_flags = 0;
3443                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3444                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3445                 tap->wt_rate = rinfo->rate;
3446                 if (k != NULL)
3447                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3448                 ieee80211_radiotap_tx(vap, m);
3449         }
3450
3451
3452         totlen = m->m_pkthdr.len;
3453
3454         flags = 0;
3455         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3456                 flags |= IWM_TX_CMD_FLG_ACK;
3457         }
3458
3459         if (type == IEEE80211_FC0_TYPE_DATA
3460             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3461             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3462                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3463         }
3464
3465         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3466             type != IEEE80211_FC0_TYPE_DATA)
3467                 tx->sta_id = sc->sc_aux_sta.sta_id;
3468         else
3469                 tx->sta_id = IWM_STATION_ID;
3470
3471         if (type == IEEE80211_FC0_TYPE_MGT) {
3472                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3473
3474                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3475                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3476                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3477                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3478                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3479                 } else {
3480                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3481                 }
3482         } else {
3483                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3484         }
3485
3486         if (hdrlen & 3) {
3487                 /* First segment length must be a multiple of 4. */
3488                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3489                 pad = 4 - (hdrlen & 3);
3490         } else
3491                 pad = 0;
3492
3493         tx->driver_txop = 0;
3494         tx->next_frame_len = 0;
3495
3496         tx->len = htole16(totlen);
3497         tx->tid_tspec = tid;
3498         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3499
3500         /* Set physical address of "scratch area". */
3501         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3502         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3503
3504         /* Copy 802.11 header in TX command. */
3505         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3506
3507         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3508
3509         tx->sec_ctl = 0;
3510         tx->tx_flags |= htole32(flags);
3511
3512         /* Trim 802.11 header. */
3513         m_adj(m, hdrlen);
3514 #if defined(__DragonFly__)
3515         error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3516                                             segs, IWM_MAX_SCATTER - 2,
3517                                             &nsegs, BUS_DMA_NOWAIT);
3518 #else
3519         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3520             segs, &nsegs, BUS_DMA_NOWAIT);
3521 #endif
3522         if (error != 0) {
3523 #if defined(__DragonFly__)
3524                 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3525                     error);
3526                 m_freem(m);
3527                 return error;
3528 #else
3529                 if (error != EFBIG) {
3530                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3531                             error);
3532                         m_freem(m);
3533                         return error;
3534                 }
3535                 /* Too many DMA segments, linearize mbuf. */
3536                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3537                 if (m1 == NULL) {
3538                         device_printf(sc->sc_dev,
3539                             "%s: could not defrag mbuf\n", __func__);
3540                         m_freem(m);
3541                         return (ENOBUFS);
3542                 }
3543                 m = m1;
3544
3545                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3546                     segs, &nsegs, BUS_DMA_NOWAIT);
3547                 if (error != 0) {
3548                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3549                             error);
3550                         m_freem(m);
3551                         return error;
3552                 }
3553 #endif
3554         }
3555         data->m = m;
3556         data->in = in;
3557         data->done = 0;
3558
3559         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3560             "sending txd %p, in %p\n", data, data->in);
3561         KASSERT(data->in != NULL, ("node is NULL"));
3562
3563         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3564             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3565             ring->qid, ring->cur, totlen, nsegs,
3566             le32toh(tx->tx_flags),
3567             le32toh(tx->rate_n_flags),
3568             tx->initial_rate_index
3569             );
3570
3571         /* Fill TX descriptor. */
3572         desc->num_tbs = 2 + nsegs;
3573
3574         desc->tbs[0].lo = htole32(data->cmd_paddr);
3575         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3576             (TB0_SIZE << 4);
3577         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3578         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3579             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3580               + hdrlen + pad - TB0_SIZE) << 4);
3581
3582         /* Other DMA segments are for data payload. */
3583         for (i = 0; i < nsegs; i++) {
3584                 seg = &segs[i];
3585                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3586                 desc->tbs[i+2].hi_n_len = \
3587                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3588                     | ((seg->ds_len) << 4);
3589         }
3590
3591         bus_dmamap_sync(ring->data_dmat, data->map,
3592             BUS_DMASYNC_PREWRITE);
3593         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3594             BUS_DMASYNC_PREWRITE);
3595         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3596             BUS_DMASYNC_PREWRITE);
3597
3598 #if 0
3599         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3600 #endif
3601
3602         /* Kick TX ring. */
3603         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3604         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3605
3606         /* Mark TX ring as full if we reach a certain threshold. */
3607         if (++ring->queued > IWM_TX_RING_HIMARK) {
3608                 sc->qfullmsk |= 1 << ring->qid;
3609         }
3610
3611         return 0;
3612 }
3613
3614 static int
3615 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3616     const struct ieee80211_bpf_params *params)
3617 {
3618         struct ieee80211com *ic = ni->ni_ic;
3619         struct iwm_softc *sc = ic->ic_softc;
3620         int error = 0;
3621
3622         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3623             "->%s begin\n", __func__);
3624
3625         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3626                 m_freem(m);
3627                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3628                     "<-%s not RUNNING\n", __func__);
3629                 return (ENETDOWN);
3630         }
3631
3632         IWM_LOCK(sc);
3633         /* XXX fix this */
3634         if (params == NULL) {
3635                 error = iwm_tx(sc, m, ni, 0);
3636         } else {
3637                 error = iwm_tx(sc, m, ni, 0);
3638         }
3639         sc->sc_tx_timer = 5;
3640         IWM_UNLOCK(sc);
3641
3642         return (error);
3643 }
3644
3645 /*
3646  * mvm/tx.c
3647  */
3648
3649 #if 0
3650 /*
3651  * Note that there are transports that buffer frames before they reach
3652  * the firmware. This means that after flush_tx_path is called, the
3653  * queue might not be empty. The race-free way to handle this is to:
3654  * 1) set the station as draining
3655  * 2) flush the Tx path
3656  * 3) wait for the transport queues to be empty
3657  */
3658 int
3659 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3660 {
3661         struct iwm_tx_path_flush_cmd flush_cmd = {
3662                 .queues_ctl = htole32(tfd_msk),
3663                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3664         };
3665         int ret;
3666
3667         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3668             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3669             sizeof(flush_cmd), &flush_cmd);
3670         if (ret)
3671                 device_printf(sc->sc_dev,
3672                     "Flushing tx queue failed: %d\n", ret);
3673         return ret;
3674 }
3675 #endif
3676
3677 static int
3678 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3679         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3680 {
3681         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3682             cmd, status);
3683 }
3684
3685 /* send station add/update command to firmware */
3686 static int
3687 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3688 {
3689         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3690         int ret;
3691         uint32_t status;
3692
3693         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3694
3695         add_sta_cmd.sta_id = IWM_STATION_ID;
3696         add_sta_cmd.mac_id_n_color
3697             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3698                 IWM_DEFAULT_COLOR));
3699         if (!update) {
3700                 int ac;
3701                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3702                         add_sta_cmd.tfd_queue_msk |=
3703                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3704                 }
3705                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3706         }
3707         add_sta_cmd.add_modify = update ? 1 : 0;
3708         add_sta_cmd.station_flags_msk
3709             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3710         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3711         if (update)
3712                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3713
3714         status = IWM_ADD_STA_SUCCESS;
3715         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3716         if (ret)
3717                 return ret;
3718
3719         switch (status) {
3720         case IWM_ADD_STA_SUCCESS:
3721                 break;
3722         default:
3723                 ret = EIO;
3724                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3725                 break;
3726         }
3727
3728         return ret;
3729 }
3730
3731 static int
3732 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3733 {
3734         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3735 }
3736
3737 static int
3738 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3739 {
3740         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3741 }
3742
3743 static int
3744 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3745         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3746 {
3747         struct iwm_mvm_add_sta_cmd_v7 cmd;
3748         int ret;
3749         uint32_t status;
3750
3751         memset(&cmd, 0, sizeof(cmd));
3752         cmd.sta_id = sta->sta_id;
3753         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3754
3755         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3756         cmd.tid_disable_tx = htole16(0xffff);
3757
3758         if (addr)
3759                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3760
3761         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3762         if (ret)
3763                 return ret;
3764
3765         switch (status) {
3766         case IWM_ADD_STA_SUCCESS:
3767                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3768                     "%s: Internal station added.\n", __func__);
3769                 return 0;
3770         default:
3771                 device_printf(sc->sc_dev,
3772                     "%s: Add internal station failed, status=0x%x\n",
3773                     __func__, status);
3774                 ret = EIO;
3775                 break;
3776         }
3777         return ret;
3778 }
3779
3780 static int
3781 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3782 {
3783         int ret;
3784
3785         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3786         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3787
3788         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3789         if (ret)
3790                 return ret;
3791
3792         ret = iwm_mvm_add_int_sta_common(sc,
3793             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3794
3795         if (ret)
3796                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3797         return ret;
3798 }
3799
3800 static int
3801 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3802 {
3803         struct iwm_time_quota_cmd cmd;
3804         int i, idx, ret, num_active_macs, quota, quota_rem;
3805         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3806         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3807         uint16_t id;
3808
3809         memset(&cmd, 0, sizeof(cmd));
3810
3811         /* currently, PHY ID == binding ID */
3812         if (in) {
3813                 id = in->in_phyctxt->id;
3814                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3815                 colors[id] = in->in_phyctxt->color;
3816
3817                 if (1)
3818                         n_ifs[id] = 1;
3819         }
3820
3821         /*
3822          * The FW's scheduling session consists of
3823          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3824          * equally between all the bindings that require quota
3825          */
3826         num_active_macs = 0;
3827         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3828                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3829                 num_active_macs += n_ifs[i];
3830         }
3831
3832         quota = 0;
3833         quota_rem = 0;
3834         if (num_active_macs) {
3835                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3836                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3837         }
3838
3839         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3840                 if (colors[i] < 0)
3841                         continue;
3842
3843                 cmd.quotas[idx].id_and_color =
3844                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3845
3846                 if (n_ifs[i] <= 0) {
3847                         cmd.quotas[idx].quota = htole32(0);
3848                         cmd.quotas[idx].max_duration = htole32(0);
3849                 } else {
3850                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3851                         cmd.quotas[idx].max_duration = htole32(0);
3852                 }
3853                 idx++;
3854         }
3855
3856         /* Give the remainder of the session to the first binding */
3857         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3858
3859         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3860             sizeof(cmd), &cmd);
3861         if (ret)
3862                 device_printf(sc->sc_dev,
3863                     "%s: Failed to send quota: %d\n", __func__, ret);
3864         return ret;
3865 }
3866
3867 /*
3868  * ieee80211 routines
3869  */
3870
3871 /*
3872  * Change to AUTH state in 80211 state machine.  Roughly matches what
3873  * Linux does in bss_info_changed().
3874  */
3875 static int
3876 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3877 {
3878         struct ieee80211_node *ni;
3879         struct iwm_node *in;
3880         struct iwm_vap *iv = IWM_VAP(vap);
3881         uint32_t duration;
3882         int error;
3883
3884         /*
3885          * XXX i have a feeling that the vap node is being
3886          * freed from underneath us. Grr.
3887          */
3888         ni = ieee80211_ref_node(vap->iv_bss);
3889         in = IWM_NODE(ni);
3890         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3891             "%s: called; vap=%p, bss ni=%p\n",
3892             __func__,
3893             vap,
3894             ni);
3895
3896         in->in_assoc = 0;
3897
3898         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3899         if (error != 0)
3900                 return error;
3901
3902         error = iwm_allow_mcast(vap, sc);
3903         if (error) {
3904                 device_printf(sc->sc_dev,
3905                     "%s: failed to set multicast\n", __func__);
3906                 goto out;
3907         }
3908
3909         /*
3910          * This is where it deviates from what Linux does.
3911          *
3912          * Linux iwlwifi doesn't reset the nic each time, nor does it
3913          * call ctxt_add() here.  Instead, it adds it during vap creation,
3914          * and always does a mac_ctx_changed().
3915          *
3916          * The openbsd port doesn't attempt to do that - it reset things
3917          * at odd states and does the add here.
3918          *
3919          * So, until the state handling is fixed (ie, we never reset
3920          * the NIC except for a firmware failure, which should drag
3921          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3922          * contexts that are required), let's do a dirty hack here.
3923          */
3924         if (iv->is_uploaded) {
3925                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3926                         device_printf(sc->sc_dev,
3927                             "%s: failed to update MAC\n", __func__);
3928                         goto out;
3929                 }
3930                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3931                     in->in_ni.ni_chan, 1, 1)) != 0) {
3932                         device_printf(sc->sc_dev,
3933                             "%s: failed update phy ctxt\n", __func__);
3934                         goto out;
3935                 }
3936                 in->in_phyctxt = &sc->sc_phyctxt[0];
3937
3938                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3939                         device_printf(sc->sc_dev,
3940                             "%s: binding update cmd\n", __func__);
3941                         goto out;
3942                 }
3943                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3944                         device_printf(sc->sc_dev,
3945                             "%s: failed to update sta\n", __func__);
3946                         goto out;
3947                 }
3948         } else {
3949                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3950                         device_printf(sc->sc_dev,
3951                             "%s: failed to add MAC\n", __func__);
3952                         goto out;
3953                 }
3954                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3955                     in->in_ni.ni_chan, 1, 1)) != 0) {
3956                         device_printf(sc->sc_dev,
3957                             "%s: failed add phy ctxt!\n", __func__);
3958                         error = ETIMEDOUT;
3959                         goto out;
3960                 }
3961                 in->in_phyctxt = &sc->sc_phyctxt[0];
3962
3963                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3964                         device_printf(sc->sc_dev,
3965                             "%s: binding add cmd\n", __func__);
3966                         goto out;
3967                 }
3968                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3969                         device_printf(sc->sc_dev,
3970                             "%s: failed to add sta\n", __func__);
3971                         goto out;
3972                 }
3973         }
3974
3975         /*
3976          * Prevent the FW from wandering off channel during association
3977          * by "protecting" the session with a time event.
3978          */
3979         /* XXX duration is in units of TU, not MS */
3980         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3981         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3982         DELAY(100);
3983
3984         error = 0;
3985 out:
3986         ieee80211_free_node(ni);
3987         return (error);
3988 }
3989
3990 static int
3991 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3992 {
3993         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3994         int error;
3995
3996         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3997                 device_printf(sc->sc_dev,
3998                     "%s: failed to update STA\n", __func__);
3999                 return error;
4000         }
4001
4002         in->in_assoc = 1;
4003         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4004                 device_printf(sc->sc_dev,
4005                     "%s: failed to update MAC\n", __func__);
4006                 return error;
4007         }
4008
4009         return 0;
4010 }
4011
4012 static int
4013 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4014 {
4015