1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
136 #include <sys/cdefs.h>
137 __FBSDID("$FreeBSD$");
139 #include <sys/param.h>
141 #include <sys/endian.h>
142 #include <sys/firmware.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/mbuf.h>
146 #include <sys/module.h>
147 #include <sys/rman.h>
148 #include <sys/sysctl.h>
149 #include <sys/linker.h>
151 #include <machine/endian.h>
153 #include <bus/pci/pcivar.h>
154 #include <bus/pci/pcireg.h>
159 #include <net/if_var.h>
160 #include <net/if_arp.h>
161 #include <net/if_dl.h>
162 #include <net/if_media.h>
163 #include <net/if_types.h>
165 #include <netinet/in.h>
166 #include <netinet/in_systm.h>
167 #include <netinet/if_ether.h>
168 #include <netinet/ip.h>
170 #include <netproto/802_11/ieee80211_var.h>
171 #include <netproto/802_11/ieee80211_regdomain.h>
172 #include <netproto/802_11/ieee80211_ratectl.h>
173 #include <netproto/802_11/ieee80211_radiotap.h>
175 #include "if_iwmreg.h"
176 #include "if_iwmvar.h"
177 #include "if_iwm_config.h"
178 #include "if_iwm_debug.h"
179 #include "if_iwm_notif_wait.h"
180 #include "if_iwm_util.h"
181 #include "if_iwm_binding.h"
182 #include "if_iwm_phy_db.h"
183 #include "if_iwm_mac_ctxt.h"
184 #include "if_iwm_phy_ctxt.h"
185 #include "if_iwm_time_event.h"
186 #include "if_iwm_power.h"
187 #include "if_iwm_scan.h"
188 #include "if_iwm_pcie_trans.h"
189 #include "if_iwm_led.h"
190 #include "if_iwm_fw.h"
192 const uint8_t iwm_nvm_channels[] = {
194 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
196 36, 40, 44, 48, 52, 56, 60, 64,
197 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
198 149, 153, 157, 161, 165
200 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
201 "IWM_NUM_CHANNELS is too small");
203 const uint8_t iwm_nvm_channels_8000[] = {
205 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
207 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
208 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
209 149, 153, 157, 161, 165, 169, 173, 177, 181
211 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
212 "IWM_NUM_CHANNELS_8000 is too small");
214 #define IWM_NUM_2GHZ_CHANNELS 14
215 #define IWM_N_HW_ADDR_MASK 0xF
218 * XXX For now, there's simply a fixed set of rate table entries
219 * that are populated.
221 const struct iwm_rate {
225 { 2, IWM_RATE_1M_PLCP },
226 { 4, IWM_RATE_2M_PLCP },
227 { 11, IWM_RATE_5M_PLCP },
228 { 22, IWM_RATE_11M_PLCP },
229 { 12, IWM_RATE_6M_PLCP },
230 { 18, IWM_RATE_9M_PLCP },
231 { 24, IWM_RATE_12M_PLCP },
232 { 36, IWM_RATE_18M_PLCP },
233 { 48, IWM_RATE_24M_PLCP },
234 { 72, IWM_RATE_36M_PLCP },
235 { 96, IWM_RATE_48M_PLCP },
236 { 108, IWM_RATE_54M_PLCP },
238 #define IWM_RIDX_CCK 0
239 #define IWM_RIDX_OFDM 4
240 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
241 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
242 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
244 struct iwm_nvm_section {
249 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz
250 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz)
252 struct iwm_mvm_alive_data {
254 uint32_t scd_base_addr;
257 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
258 static int iwm_firmware_store_section(struct iwm_softc *,
260 const uint8_t *, size_t);
261 static int iwm_set_default_calib(struct iwm_softc *, const void *);
262 static void iwm_fw_info_free(struct iwm_fw_info *);
263 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
264 #if !defined(__DragonFly__)
265 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
267 static int iwm_alloc_fwmem(struct iwm_softc *);
268 static int iwm_alloc_sched(struct iwm_softc *);
269 static int iwm_alloc_kw(struct iwm_softc *);
270 static int iwm_alloc_ict(struct iwm_softc *);
271 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
272 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
276 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
278 static void iwm_enable_interrupts(struct iwm_softc *);
279 static void iwm_restore_interrupts(struct iwm_softc *);
280 static void iwm_disable_interrupts(struct iwm_softc *);
281 static void iwm_ict_reset(struct iwm_softc *);
282 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
283 static void iwm_stop_device(struct iwm_softc *);
284 static void iwm_mvm_nic_config(struct iwm_softc *);
285 static int iwm_nic_rx_init(struct iwm_softc *);
286 static int iwm_nic_tx_init(struct iwm_softc *);
287 static int iwm_nic_init(struct iwm_softc *);
288 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
289 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
290 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
291 uint16_t, uint8_t *, uint16_t *);
292 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
293 uint16_t *, uint32_t);
294 static uint32_t iwm_eeprom_channel_flags(uint16_t);
295 static void iwm_add_channel_band(struct iwm_softc *,
296 struct ieee80211_channel[], int, int *, int, size_t,
298 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
299 struct ieee80211_channel[]);
300 static struct iwm_nvm_data *
301 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
302 const uint16_t *, const uint16_t *,
303 const uint16_t *, const uint16_t *,
305 static void iwm_free_nvm_data(struct iwm_nvm_data *);
306 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
307 struct iwm_nvm_data *,
310 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
312 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
313 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
315 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
317 static void iwm_set_radio_cfg(const struct iwm_softc *,
318 struct iwm_nvm_data *, uint32_t);
319 static struct iwm_nvm_data *
320 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
321 static int iwm_nvm_init(struct iwm_softc *);
322 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
323 const struct iwm_fw_desc *);
324 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
325 bus_addr_t, uint32_t);
326 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
327 const struct iwm_fw_sects *,
329 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
330 const struct iwm_fw_sects *,
332 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
333 const struct iwm_fw_sects *);
334 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
335 const struct iwm_fw_sects *);
336 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
337 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
338 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
339 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
340 enum iwm_ucode_type);
341 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
342 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
343 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
344 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
345 struct iwm_rx_phy_info *);
346 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
347 struct iwm_rx_packet *);
348 static int iwm_get_noise(struct iwm_softc *sc,
349 const struct iwm_mvm_statistics_rx_non_phy *);
350 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
351 struct iwm_rx_data *);
352 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
353 struct iwm_rx_packet *,
355 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
356 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
358 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
361 static const struct iwm_rate *
362 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
363 struct ieee80211_frame *, struct iwm_tx_cmd *);
364 static int iwm_tx(struct iwm_softc *, struct mbuf *,
365 struct ieee80211_node *, int);
366 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
367 const struct ieee80211_bpf_params *);
368 static int iwm_mvm_flush_tx_path(struct iwm_softc *sc,
369 uint32_t tfd_msk, uint32_t flags);
370 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
371 struct iwm_mvm_add_sta_cmd_v7 *,
373 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
375 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
376 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
377 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
378 struct iwm_int_sta *,
379 const uint8_t *, uint16_t, uint16_t);
380 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
381 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
382 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
383 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
384 static int iwm_release(struct iwm_softc *, struct iwm_node *);
385 static struct ieee80211_node *
386 iwm_node_alloc(struct ieee80211vap *,
387 const uint8_t[IEEE80211_ADDR_LEN]);
388 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
389 static int iwm_media_change(struct ifnet *);
390 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
391 static void iwm_endscan_cb(void *, int);
392 static void iwm_mvm_fill_sf_command(struct iwm_softc *,
393 struct iwm_sf_cfg_cmd *,
394 struct ieee80211_node *);
395 static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
396 static int iwm_send_bt_init_conf(struct iwm_softc *);
397 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
398 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
399 static int iwm_init_hw(struct iwm_softc *);
400 static void iwm_init(struct iwm_softc *);
401 static void iwm_start(struct iwm_softc *);
402 static void iwm_stop(struct iwm_softc *);
403 static void iwm_watchdog(void *);
404 static void iwm_parent(struct ieee80211com *);
407 iwm_desc_lookup(uint32_t);
408 static void iwm_nic_error(struct iwm_softc *);
409 static void iwm_nic_umac_error(struct iwm_softc *);
411 static void iwm_notif_intr(struct iwm_softc *);
412 static void iwm_intr(void *);
413 static int iwm_attach(device_t);
414 static int iwm_is_valid_ether_addr(uint8_t *);
415 static void iwm_preinit(void *);
416 static int iwm_detach_local(struct iwm_softc *sc, int);
417 static void iwm_init_task(void *);
418 static void iwm_radiotap_attach(struct iwm_softc *);
419 static struct ieee80211vap *
420 iwm_vap_create(struct ieee80211com *,
421 const char [IFNAMSIZ], int,
422 enum ieee80211_opmode, int,
423 const uint8_t [IEEE80211_ADDR_LEN],
424 const uint8_t [IEEE80211_ADDR_LEN]);
425 static void iwm_vap_delete(struct ieee80211vap *);
426 static void iwm_scan_start(struct ieee80211com *);
427 static void iwm_scan_end(struct ieee80211com *);
428 static void iwm_update_mcast(struct ieee80211com *);
429 static void iwm_set_channel(struct ieee80211com *);
430 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
431 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
432 static int iwm_detach(device_t);
434 #if defined(__DragonFly__)
435 static int iwm_msi_enable = 1;
437 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
446 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
448 const struct iwm_fw_cscheme_list *l = (const void *)data;
450 if (dlen < sizeof(*l) ||
451 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
454 /* we don't actually store anything for now, always use s/w crypto */
460 iwm_firmware_store_section(struct iwm_softc *sc,
461 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
463 struct iwm_fw_sects *fws;
464 struct iwm_fw_desc *fwone;
466 if (type >= IWM_UCODE_TYPE_MAX)
468 if (dlen < sizeof(uint32_t))
471 fws = &sc->sc_fw.fw_sects[type];
472 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
475 fwone = &fws->fw_sect[fws->fw_count];
477 /* first 32bit are device load offset */
478 memcpy(&fwone->offset, data, sizeof(uint32_t));
481 fwone->data = data + sizeof(uint32_t);
482 fwone->len = dlen - sizeof(uint32_t);
489 #define IWM_DEFAULT_SCAN_CHANNELS 40
491 struct iwm_tlv_calib_data {
493 struct iwm_tlv_calib_ctrl calib;
497 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
499 const struct iwm_tlv_calib_data *def_calib = data;
500 uint32_t ucode_type = le32toh(def_calib->ucode_type);
502 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
503 device_printf(sc->sc_dev,
504 "Wrong ucode_type %u for default "
505 "calibration.\n", ucode_type);
509 sc->sc_default_calib[ucode_type].flow_trigger =
510 def_calib->calib.flow_trigger;
511 sc->sc_default_calib[ucode_type].event_trigger =
512 def_calib->calib.event_trigger;
518 iwm_fw_info_free(struct iwm_fw_info *fw)
520 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
522 /* don't touch fw->fw_status */
523 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
527 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
529 struct iwm_fw_info *fw = &sc->sc_fw;
530 const struct iwm_tlv_ucode_header *uhdr;
531 struct iwm_ucode_tlv tlv;
532 enum iwm_ucode_tlv_type tlv_type;
533 const struct firmware *fwp;
535 uint32_t usniffer_img;
536 uint32_t paging_mem_size;
541 if (fw->fw_status == IWM_FW_STATUS_DONE &&
542 ucode_type != IWM_UCODE_INIT)
545 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
546 #if defined(__DragonFly__)
547 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
549 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
552 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
554 if (fw->fw_fp != NULL)
555 iwm_fw_info_free(fw);
558 * Load firmware into driver memory.
562 fwp = firmware_get(sc->cfg->fw_name);
565 device_printf(sc->sc_dev,
566 "could not read firmware %s (error %d)\n",
567 sc->cfg->fw_name, error);
572 /* (Re-)Initialize default values. */
573 sc->sc_capaflags = 0;
574 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
575 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
576 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
579 * Parse firmware contents
582 uhdr = (const void *)fw->fw_fp->data;
583 if (*(const uint32_t *)fw->fw_fp->data != 0
584 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 device_printf(sc->sc_dev, "invalid firmware %s\n",
591 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
592 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 IWM_UCODE_API(le32toh(uhdr->ver)));
596 len = fw->fw_fp->datasize - sizeof(*uhdr);
598 while (len >= sizeof(tlv)) {
600 const void *tlv_data;
602 memcpy(&tlv, data, sizeof(tlv));
603 tlv_len = le32toh(tlv.length);
604 tlv_type = le32toh(tlv.type);
611 device_printf(sc->sc_dev,
612 "firmware too short: %zu bytes\n",
618 switch ((int)tlv_type) {
619 case IWM_UCODE_TLV_PROBE_MAX_LEN:
620 if (tlv_len < sizeof(uint32_t)) {
621 device_printf(sc->sc_dev,
622 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
628 sc->sc_capa_max_probe_len
629 = le32toh(*(const uint32_t *)tlv_data);
630 /* limit it to something sensible */
631 if (sc->sc_capa_max_probe_len >
632 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
633 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
634 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
635 "ridiculous\n", __func__);
640 case IWM_UCODE_TLV_PAN:
642 device_printf(sc->sc_dev,
643 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
649 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
651 case IWM_UCODE_TLV_FLAGS:
652 if (tlv_len < sizeof(uint32_t)) {
653 device_printf(sc->sc_dev,
654 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
661 * Apparently there can be many flags, but Linux driver
662 * parses only the first one, and so do we.
664 * XXX: why does this override IWM_UCODE_TLV_PAN?
665 * Intentional or a bug? Observations from
666 * current firmware file:
667 * 1) TLV_PAN is parsed first
668 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
669 * ==> this resets TLV_PAN to itself... hnnnk
671 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
673 case IWM_UCODE_TLV_CSCHEME:
674 if ((error = iwm_store_cscheme(sc,
675 tlv_data, tlv_len)) != 0) {
676 device_printf(sc->sc_dev,
677 "%s: iwm_store_cscheme(): returned %d\n",
683 case IWM_UCODE_TLV_NUM_OF_CPU:
684 if (tlv_len != sizeof(uint32_t)) {
685 device_printf(sc->sc_dev,
686 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
692 num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
693 if (num_of_cpus == 2) {
694 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
696 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
698 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
700 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 device_printf(sc->sc_dev,
702 "%s: Driver supports only 1 or 2 CPUs\n",
708 case IWM_UCODE_TLV_SEC_RT:
709 if ((error = iwm_firmware_store_section(sc,
710 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 device_printf(sc->sc_dev,
712 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
718 case IWM_UCODE_TLV_SEC_INIT:
719 if ((error = iwm_firmware_store_section(sc,
720 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
721 device_printf(sc->sc_dev,
722 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
728 case IWM_UCODE_TLV_SEC_WOWLAN:
729 if ((error = iwm_firmware_store_section(sc,
730 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
731 device_printf(sc->sc_dev,
732 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
738 case IWM_UCODE_TLV_DEF_CALIB:
739 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
740 device_printf(sc->sc_dev,
741 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
744 (int) sizeof(struct iwm_tlv_calib_data));
748 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
749 device_printf(sc->sc_dev,
750 "%s: iwm_set_default_calib() failed: %d\n",
756 case IWM_UCODE_TLV_PHY_SKU:
757 if (tlv_len != sizeof(uint32_t)) {
759 device_printf(sc->sc_dev,
760 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
765 sc->sc_fw.phy_config =
766 le32toh(*(const uint32_t *)tlv_data);
767 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
768 IWM_FW_PHY_CFG_TX_CHAIN) >>
769 IWM_FW_PHY_CFG_TX_CHAIN_POS;
770 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
771 IWM_FW_PHY_CFG_RX_CHAIN) >>
772 IWM_FW_PHY_CFG_RX_CHAIN_POS;
775 case IWM_UCODE_TLV_API_CHANGES_SET: {
776 const struct iwm_ucode_api *api;
777 if (tlv_len != sizeof(*api)) {
781 api = (const struct iwm_ucode_api *)tlv_data;
782 /* Flags may exceed 32 bits in future firmware. */
783 if (le32toh(api->api_index) > 0) {
784 device_printf(sc->sc_dev,
785 "unsupported API index %d\n",
786 le32toh(api->api_index));
789 sc->sc_ucode_api = le32toh(api->api_flags);
793 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
794 const struct iwm_ucode_capa *capa;
796 if (tlv_len != sizeof(*capa)) {
800 capa = (const struct iwm_ucode_capa *)tlv_data;
801 idx = le32toh(capa->api_index);
802 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
803 device_printf(sc->sc_dev,
804 "unsupported API index %d\n", idx);
807 for (i = 0; i < 32; i++) {
808 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
810 setbit(sc->sc_enabled_capa, i + (32 * idx));
815 case 48: /* undocumented TLV */
816 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
817 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
818 /* ignore, not used by current driver */
821 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
822 if ((error = iwm_firmware_store_section(sc,
823 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
828 case IWM_UCODE_TLV_PAGING:
829 if (tlv_len != sizeof(uint32_t)) {
833 paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
835 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
836 "%s: Paging: paging enabled (size = %u bytes)\n",
837 __func__, paging_mem_size);
838 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
839 device_printf(sc->sc_dev,
840 "%s: Paging: driver supports up to %u bytes for paging image\n",
841 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
845 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
846 device_printf(sc->sc_dev,
847 "%s: Paging: image isn't multiple %u\n",
848 __func__, IWM_FW_PAGING_SIZE);
853 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
855 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
856 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
860 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
861 if (tlv_len != sizeof(uint32_t)) {
865 sc->sc_capa_n_scan_channels =
866 le32toh(*(const uint32_t *)tlv_data);
869 case IWM_UCODE_TLV_FW_VERSION:
870 if (tlv_len != sizeof(uint32_t) * 3) {
874 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
876 le32toh(((const uint32_t *)tlv_data)[0]),
877 le32toh(((const uint32_t *)tlv_data)[1]),
878 le32toh(((const uint32_t *)tlv_data)[2]));
881 case IWM_UCODE_TLV_FW_MEM_SEG:
885 device_printf(sc->sc_dev,
886 "%s: unknown firmware section %d, abort\n",
892 len -= roundup(tlv_len, 4);
893 data += roundup(tlv_len, 4);
896 KASSERT(error == 0, ("unhandled error"));
900 device_printf(sc->sc_dev, "firmware parse error %d, "
901 "section type %d\n", error, tlv_type);
904 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
905 device_printf(sc->sc_dev,
906 "device uses unsupported power ops\n");
912 fw->fw_status = IWM_FW_STATUS_NONE;
913 if (fw->fw_fp != NULL)
914 iwm_fw_info_free(fw);
916 fw->fw_status = IWM_FW_STATUS_DONE;
923 * DMA resource routines
926 /* fwmem is used to load firmware onto the card */
928 iwm_alloc_fwmem(struct iwm_softc *sc)
930 /* Must be aligned on a 16-byte boundary. */
931 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
932 IWM_FH_MEM_TB_MAX_LENGTH, 16);
935 /* tx scheduler rings. not used? */
937 iwm_alloc_sched(struct iwm_softc *sc)
939 /* TX scheduler rings must be aligned on a 1KB boundary. */
940 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
941 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
944 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
946 iwm_alloc_kw(struct iwm_softc *sc)
948 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
951 /* interrupt cause table */
953 iwm_alloc_ict(struct iwm_softc *sc)
955 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
956 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
960 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
967 /* Allocate RX descriptors (256-byte aligned). */
968 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
969 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
971 device_printf(sc->sc_dev,
972 "could not allocate RX ring DMA memory\n");
975 ring->desc = ring->desc_dma.vaddr;
977 /* Allocate RX status area (16-byte aligned). */
978 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
979 sizeof(*ring->stat), 16);
981 device_printf(sc->sc_dev,
982 "could not allocate RX status DMA memory\n");
985 ring->stat = ring->stat_dma.vaddr;
987 /* Create RX buffer DMA tag. */
988 #if defined(__DragonFly__)
989 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
991 BUS_SPACE_MAXADDR_32BIT,
994 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
995 BUS_DMA_NOWAIT, &ring->data_dmat);
997 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
998 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
999 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1002 device_printf(sc->sc_dev,
1003 "%s: could not create RX buf DMA tag, error %d\n",
1008 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1009 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1011 device_printf(sc->sc_dev,
1012 "%s: could not create RX buf DMA map, error %d\n",
1017 * Allocate and map RX buffers.
1019 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1020 struct iwm_rx_data *data = &ring->data[i];
1021 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1023 device_printf(sc->sc_dev,
1024 "%s: could not create RX buf DMA map, error %d\n",
1030 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1036 fail: iwm_free_rx_ring(sc, ring);
1041 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1043 /* Reset the ring state */
1047 * The hw rx ring index in shared memory must also be cleared,
1048 * otherwise the discrepancy can cause reprocessing chaos.
1050 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1054 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1058 iwm_dma_contig_free(&ring->desc_dma);
1059 iwm_dma_contig_free(&ring->stat_dma);
1061 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1062 struct iwm_rx_data *data = &ring->data[i];
1064 if (data->m != NULL) {
1065 bus_dmamap_sync(ring->data_dmat, data->map,
1066 BUS_DMASYNC_POSTREAD);
1067 bus_dmamap_unload(ring->data_dmat, data->map);
1071 if (data->map != NULL) {
1072 bus_dmamap_destroy(ring->data_dmat, data->map);
1076 if (ring->spare_map != NULL) {
1077 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1078 ring->spare_map = NULL;
1080 if (ring->data_dmat != NULL) {
1081 bus_dma_tag_destroy(ring->data_dmat);
1082 ring->data_dmat = NULL;
1087 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1099 /* Allocate TX descriptors (256-byte aligned). */
1100 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1101 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1103 device_printf(sc->sc_dev,
1104 "could not allocate TX ring DMA memory\n");
1107 ring->desc = ring->desc_dma.vaddr;
1110 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1111 * to allocate commands space for other rings.
1113 if (qid > IWM_MVM_CMD_QUEUE)
1116 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1117 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1119 device_printf(sc->sc_dev,
1120 "could not allocate TX cmd DMA memory\n");
1123 ring->cmd = ring->cmd_dma.vaddr;
1125 /* FW commands may require more mapped space than packets. */
1126 if (qid == IWM_MVM_CMD_QUEUE) {
1127 maxsize = IWM_RBUF_SIZE;
1131 nsegments = IWM_MAX_SCATTER - 2;
1134 #if defined(__DragonFly__)
1135 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1137 BUS_SPACE_MAXADDR_32BIT,
1140 maxsize, nsegments, maxsize,
1141 BUS_DMA_NOWAIT, &ring->data_dmat);
1143 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1144 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1145 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1148 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1152 paddr = ring->cmd_dma.paddr;
1153 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 struct iwm_tx_data *data = &ring->data[i];
1156 data->cmd_paddr = paddr;
1157 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1158 + offsetof(struct iwm_tx_cmd, scratch);
1159 paddr += sizeof(struct iwm_device_cmd);
1161 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1163 device_printf(sc->sc_dev,
1164 "could not create TX buf DMA map\n");
1168 KASSERT(paddr == ring->cmd_dma.paddr + size,
1169 ("invalid physical address"));
1172 fail: iwm_free_tx_ring(sc, ring);
1177 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1181 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1182 struct iwm_tx_data *data = &ring->data[i];
1184 if (data->m != NULL) {
1185 bus_dmamap_sync(ring->data_dmat, data->map,
1186 BUS_DMASYNC_POSTWRITE);
1187 bus_dmamap_unload(ring->data_dmat, data->map);
1192 /* Clear TX descriptors. */
1193 memset(ring->desc, 0, ring->desc_dma.size);
1194 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1195 BUS_DMASYNC_PREWRITE);
1196 sc->qfullmsk &= ~(1 << ring->qid);
1200 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1201 iwm_pcie_clear_cmd_in_flight(sc);
1205 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1209 iwm_dma_contig_free(&ring->desc_dma);
1210 iwm_dma_contig_free(&ring->cmd_dma);
1212 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213 struct iwm_tx_data *data = &ring->data[i];
1215 if (data->m != NULL) {
1216 bus_dmamap_sync(ring->data_dmat, data->map,
1217 BUS_DMASYNC_POSTWRITE);
1218 bus_dmamap_unload(ring->data_dmat, data->map);
1222 if (data->map != NULL) {
1223 bus_dmamap_destroy(ring->data_dmat, data->map);
1227 if (ring->data_dmat != NULL) {
1228 bus_dma_tag_destroy(ring->data_dmat);
1229 ring->data_dmat = NULL;
1234 * High-level hardware frobbing routines
1238 iwm_enable_interrupts(struct iwm_softc *sc)
1240 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1241 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1245 iwm_restore_interrupts(struct iwm_softc *sc)
1247 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1251 iwm_disable_interrupts(struct iwm_softc *sc)
1253 /* disable interrupts */
1254 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1256 /* acknowledge all interrupts */
1257 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1258 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1262 iwm_ict_reset(struct iwm_softc *sc)
1264 iwm_disable_interrupts(sc);
1266 /* Reset ICT table. */
1267 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1270 /* Set physical address of ICT table (4KB aligned). */
1271 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1272 IWM_CSR_DRAM_INT_TBL_ENABLE
1273 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1274 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1275 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1277 /* Switch to ICT interrupt mode in driver. */
1278 sc->sc_flags |= IWM_FLAG_USE_ICT;
1280 /* Re-enable interrupts. */
1281 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1282 iwm_enable_interrupts(sc);
1286 * Since this .. hard-resets things, it's time to actually
1287 * mark the first vap (if any) as having no mac context.
1288 * It's annoying, but since the driver is potentially being
1289 * stop/start'ed whilst active (thanks openbsd port!) we
1290 * have to correctly track this.
1293 iwm_stop_device(struct iwm_softc *sc)
1295 struct ieee80211com *ic = &sc->sc_ic;
1296 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1300 /* tell the device to stop sending interrupts */
1301 iwm_disable_interrupts(sc);
1304 * FreeBSD-local: mark the first vap as not-uploaded,
1305 * so the next transition through auth/assoc
1306 * will correctly populate the MAC context.
1309 struct iwm_vap *iv = IWM_VAP(vap);
1310 iv->is_uploaded = 0;
1313 /* device going down, Stop using ICT table */
1314 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1316 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1318 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1320 if (iwm_nic_lock(sc)) {
1321 /* Stop each Tx DMA channel */
1322 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1324 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1325 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1328 /* Wait for DMA channels to be idle */
1329 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1331 device_printf(sc->sc_dev,
1332 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1333 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1337 iwm_pcie_rx_stop(sc);
1340 iwm_reset_rx_ring(sc, &sc->rxq);
1342 /* Reset all TX rings. */
1343 for (qid = 0; qid < nitems(sc->txq); qid++)
1344 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1346 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1347 /* Power-down device's busmaster DMA clocks */
1348 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1349 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1353 /* Make sure (redundant) we've released our request to stay awake */
1354 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1355 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1357 /* Stop the device, and put it in low power state */
1360 /* stop and reset the on-board processor */
1361 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1365 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1366 * This is a bug in certain verions of the hardware.
1367 * Certain devices also keep sending HW RF kill interrupt all
1368 * the time, unless the interrupt is ACKed even if the interrupt
1369 * should be masked. Re-ACK all the interrupts here.
1371 iwm_disable_interrupts(sc);
1374 * Even if we stop the HW, we still want the RF kill
1377 iwm_enable_rfkill_int(sc);
1378 iwm_check_rfkill(sc);
1382 iwm_mvm_nic_config(struct iwm_softc *sc)
1384 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1385 uint32_t reg_val = 0;
1386 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1388 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1389 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1390 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1391 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1392 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1393 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1396 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1397 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1398 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1399 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1401 /* radio configuration */
1402 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1403 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1404 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1406 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1408 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1409 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1410 radio_cfg_step, radio_cfg_dash);
1413 * W/A : NIC is stuck in a reset state after Early PCIe power off
1414 * (PCIe power is lost before PERST# is asserted), causing ME FW
1415 * to lose ownership and not being able to obtain it back.
1417 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1418 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1419 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1420 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1425 iwm_nic_rx_init(struct iwm_softc *sc)
1428 * Initialize RX ring. This is from the iwn driver.
1430 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1433 iwm_pcie_rx_stop(sc);
1435 if (!iwm_nic_lock(sc))
1438 /* reset and flush pointers */
1439 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1440 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1441 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1442 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1444 /* Set physical address of RX ring (256-byte aligned). */
1446 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1448 /* Set physical address of RX status (16-byte aligned). */
1450 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1452 #if defined(__DragonFly__)
1453 /* Force serialization (probably not needed but don't trust the HW) */
1454 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1458 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1459 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1460 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1461 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1462 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1463 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1464 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1465 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1467 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1469 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1470 if (sc->cfg->host_interrupt_operation_mode)
1471 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1474 * Thus sayeth el jefe (iwlwifi) via a comment:
1476 * This value should initially be 0 (before preparing any
1477 * RBs), should be 8 after preparing the first 8 RBs (for example)
1479 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1487 iwm_nic_tx_init(struct iwm_softc *sc)
1491 if (!iwm_nic_lock(sc))
1494 /* Deactivate TX scheduler. */
1495 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1497 /* Set physical address of "keep warm" page (16-byte aligned). */
1498 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1500 /* Initialize TX rings. */
1501 for (qid = 0; qid < nitems(sc->txq); qid++) {
1502 struct iwm_tx_ring *txq = &sc->txq[qid];
1504 /* Set physical address of TX ring (256-byte aligned). */
1505 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1506 txq->desc_dma.paddr >> 8);
1507 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1508 "%s: loading ring %d descriptors (%p) at %lx\n",
1511 (unsigned long) (txq->desc_dma.paddr >> 8));
1514 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1522 iwm_nic_init(struct iwm_softc *sc)
1527 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1530 iwm_mvm_nic_config(sc);
1532 if ((error = iwm_nic_rx_init(sc)) != 0)
1536 * Ditto for TX, from iwn
1538 if ((error = iwm_nic_tx_init(sc)) != 0)
1541 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1542 "%s: shadow registers enabled\n", __func__);
1543 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1548 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1556 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1558 if (!iwm_nic_lock(sc)) {
1559 device_printf(sc->sc_dev,
1560 "%s: cannot enable txq %d\n",
1566 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1568 if (qid == IWM_MVM_CMD_QUEUE) {
1569 /* unactivate before configuration */
1570 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1571 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1572 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1576 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1578 if (!iwm_nic_lock(sc)) {
1579 device_printf(sc->sc_dev,
1580 "%s: cannot enable txq %d\n", __func__, qid);
1583 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1586 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1587 /* Set scheduler window size and frame limit. */
1589 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1591 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1592 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1593 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1594 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1596 if (!iwm_nic_lock(sc)) {
1597 device_printf(sc->sc_dev,
1598 "%s: cannot enable txq %d\n", __func__, qid);
1601 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1602 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1603 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1604 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1605 IWM_SCD_QUEUE_STTS_REG_MSK);
1607 struct iwm_scd_txq_cfg_cmd cmd;
1612 memset(&cmd, 0, sizeof(cmd));
1613 cmd.scd_queue = qid;
1615 cmd.sta_id = sta_id;
1618 cmd.window = IWM_FRAME_LIMIT;
1620 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1623 device_printf(sc->sc_dev,
1624 "cannot enable txq %d\n", qid);
1628 if (!iwm_nic_lock(sc))
1632 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1633 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1637 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1638 __func__, qid, fifo);
1644 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1648 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1649 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1651 if (!iwm_nic_lock(sc))
1658 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1659 if (scd_base_addr != 0 &&
1660 scd_base_addr != sc->scd_base_addr) {
1661 device_printf(sc->sc_dev,
1662 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1663 __func__, sc->scd_base_addr, scd_base_addr);
1666 /* reset context data, TX status and translation data */
1667 error = iwm_write_mem(sc,
1668 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1669 NULL, clear_dwords);
1673 if (!iwm_nic_lock(sc))
1676 /* Set physical address of TX scheduler rings (1KB aligned). */
1677 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1679 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1683 /* enable command channel */
1684 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1688 if (!iwm_nic_lock(sc))
1691 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1693 /* Enable DMA channels. */
1694 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1695 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1696 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1697 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1700 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1701 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1705 /* Enable L1-Active */
1706 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1707 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1708 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1715 * NVM read access and content parsing. We do not support
1716 * external NVM or writing NVM.
1720 /* Default NVM size to read */
1721 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1723 #define IWM_NVM_WRITE_OPCODE 1
1724 #define IWM_NVM_READ_OPCODE 0
1726 /* load nvm chunk response */
1728 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1729 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1733 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1734 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1736 struct iwm_nvm_access_cmd nvm_access_cmd = {
1737 .offset = htole16(offset),
1738 .length = htole16(length),
1739 .type = htole16(section),
1740 .op_code = IWM_NVM_READ_OPCODE,
1742 struct iwm_nvm_access_resp *nvm_resp;
1743 struct iwm_rx_packet *pkt;
1744 struct iwm_host_cmd cmd = {
1745 .id = IWM_NVM_ACCESS_CMD,
1746 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1747 .data = { &nvm_access_cmd, },
1749 int ret, bytes_read, offset_read;
1752 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1754 ret = iwm_send_cmd(sc, &cmd);
1756 device_printf(sc->sc_dev,
1757 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1763 /* Extract NVM response */
1764 nvm_resp = (void *)pkt->data;
1765 ret = le16toh(nvm_resp->status);
1766 bytes_read = le16toh(nvm_resp->length);
1767 offset_read = le16toh(nvm_resp->offset);
1768 resp_data = nvm_resp->data;
1770 if ((offset != 0) &&
1771 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1773 * meaning of NOT_VALID_ADDRESS:
1774 * driver try to read chunk from address that is
1775 * multiple of 2K and got an error since addr is empty.
1776 * meaning of (offset != 0): driver already
1777 * read valid data from another chunk so this case
1780 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1781 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1786 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1787 "NVM access command failed with status %d\n", ret);
1793 if (offset_read != offset) {
1794 device_printf(sc->sc_dev,
1795 "NVM ACCESS response with invalid offset %d\n",
1801 if (bytes_read > length) {
1802 device_printf(sc->sc_dev,
1803 "NVM ACCESS response with too much data "
1804 "(%d bytes requested, %d bytes received)\n",
1805 length, bytes_read);
1810 /* Write data to NVM */
1811 memcpy(data + offset, resp_data, bytes_read);
1815 iwm_free_resp(sc, &cmd);
1820 * Reads an NVM section completely.
1821 * NICs prior to 7000 family don't have a real NVM, but just read
1822 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1823 * by uCode, we need to manually check in this case that we don't
1824 * overflow and try to read more than the EEPROM size.
1825 * For 7000 family NICs, we supply the maximal size we can read, and
1826 * the uCode fills the response with as much data as we can,
1827 * without overflowing, so no check is needed.
1830 iwm_nvm_read_section(struct iwm_softc *sc,
1831 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1833 uint16_t seglen, length, offset = 0;
1836 /* Set nvm section read length */
1837 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1841 /* Read the NVM until exhausted (reading less than requested) */
1842 while (seglen == length) {
1843 /* Check no memory assumptions fail and cause an overflow */
1844 if ((size_read + offset + length) >
1845 sc->cfg->eeprom_size) {
1846 device_printf(sc->sc_dev,
1847 "EEPROM size is too small for NVM\n");
1851 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1853 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1854 "Cannot read NVM from section %d offset %d, length %d\n",
1855 section, offset, length);
1861 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1862 "NVM section %d read completed\n", section);
1867 /* NVM offsets (in words) definitions */
1868 enum iwm_nvm_offsets {
1869 /* NVM HW-Section offset (in words) definitions */
1872 /* NVM SW-Section offset (in words) definitions */
1873 IWM_NVM_SW_SECTION = 0x1C0,
1874 IWM_NVM_VERSION = 0,
1878 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1880 /* NVM calibration section offset (in words) definitions */
1881 IWM_NVM_CALIB_SECTION = 0x2B8,
1882 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1885 enum iwm_8000_nvm_offsets {
1886 /* NVM HW-Section offset (in words) definitions */
1887 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1888 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1889 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1890 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1891 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1893 /* NVM SW-Section offset (in words) definitions */
1894 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1895 IWM_NVM_VERSION_8000 = 0,
1896 IWM_RADIO_CFG_8000 = 0,
1898 IWM_N_HW_ADDRS_8000 = 3,
1900 /* NVM REGULATORY -Section offset (in words) definitions */
1901 IWM_NVM_CHANNELS_8000 = 0,
1902 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1903 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1904 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1906 /* NVM calibration section offset (in words) definitions */
1907 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1908 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1911 /* SKU Capabilities (actual values from NVM definition) */
1913 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1914 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1915 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1916 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1919 /* radio config bits (actual values from NVM definition) */
1920 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1921 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1922 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1923 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1924 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1925 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1927 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1928 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1929 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1930 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1931 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1932 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1934 #define DEFAULT_MAX_TX_POWER 16
1937 * enum iwm_nvm_channel_flags - channel flags in NVM
1938 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1939 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1940 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1941 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1942 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1943 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1944 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1945 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1946 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1947 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1949 enum iwm_nvm_channel_flags {
1950 IWM_NVM_CHANNEL_VALID = (1 << 0),
1951 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1952 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1953 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1954 IWM_NVM_CHANNEL_DFS = (1 << 7),
1955 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1956 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1957 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1958 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1962 * Translate EEPROM flags to net80211.
1965 iwm_eeprom_channel_flags(uint16_t ch_flags)
1970 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1971 nflags |= IEEE80211_CHAN_PASSIVE;
1972 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1973 nflags |= IEEE80211_CHAN_NOADHOC;
1974 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1975 nflags |= IEEE80211_CHAN_DFS;
1977 nflags |= IEEE80211_CHAN_NOADHOC;
1984 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1985 int maxchans, int *nchans, int ch_idx, size_t ch_num,
1986 const uint8_t bands[])
1988 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1994 for (; ch_idx < ch_num; ch_idx++) {
1995 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1996 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1997 ieee = iwm_nvm_channels[ch_idx];
1999 ieee = iwm_nvm_channels_8000[ch_idx];
2001 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2002 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2003 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2005 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2010 nflags = iwm_eeprom_channel_flags(ch_flags);
2011 error = ieee80211_add_channel(chans, maxchans, nchans,
2012 ieee, 0, 0, nflags, bands);
2016 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2017 "Ch. %d Flags %x [%sGHz] - Added\n",
2019 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2025 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2026 struct ieee80211_channel chans[])
2028 struct iwm_softc *sc = ic->ic_softc;
2029 struct iwm_nvm_data *data = sc->nvm_data;
2030 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2033 memset(bands, 0, sizeof(bands));
2034 /* 1-13: 11b/g channels. */
2035 setbit(bands, IEEE80211_MODE_11B);
2036 setbit(bands, IEEE80211_MODE_11G);
2037 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2038 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2040 /* 14: 11b channel only. */
2041 clrbit(bands, IEEE80211_MODE_11G);
2042 iwm_add_channel_band(sc, chans, maxchans, nchans,
2043 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2045 if (data->sku_cap_band_52GHz_enable) {
2046 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2047 ch_num = nitems(iwm_nvm_channels);
2049 ch_num = nitems(iwm_nvm_channels_8000);
2050 memset(bands, 0, sizeof(bands));
2051 setbit(bands, IEEE80211_MODE_11A);
2052 iwm_add_channel_band(sc, chans, maxchans, nchans,
2053 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2058 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2059 const uint16_t *mac_override, const uint16_t *nvm_hw)
2061 const uint8_t *hw_addr;
2064 static const uint8_t reserved_mac[] = {
2065 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2068 hw_addr = (const uint8_t *)(mac_override +
2069 IWM_MAC_ADDRESS_OVERRIDE_8000);
2072 * Store the MAC address from MAO section.
2073 * No byte swapping is required in MAO section
2075 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2078 * Force the use of the OTP MAC address in case of reserved MAC
2079 * address in the NVM, or if address is given but invalid.
2081 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2082 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2083 iwm_is_valid_ether_addr(data->hw_addr) &&
2084 !IEEE80211_IS_MULTICAST(data->hw_addr))
2087 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2088 "%s: mac address from nvm override section invalid\n",
2093 /* read the mac address from WFMP registers */
2094 uint32_t mac_addr0 =
2095 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2096 uint32_t mac_addr1 =
2097 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2099 hw_addr = (const uint8_t *)&mac_addr0;
2100 data->hw_addr[0] = hw_addr[3];
2101 data->hw_addr[1] = hw_addr[2];
2102 data->hw_addr[2] = hw_addr[1];
2103 data->hw_addr[3] = hw_addr[0];
2105 hw_addr = (const uint8_t *)&mac_addr1;
2106 data->hw_addr[4] = hw_addr[1];
2107 data->hw_addr[5] = hw_addr[0];
2112 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2113 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2117 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2118 const uint16_t *phy_sku)
2120 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2121 return le16_to_cpup(nvm_sw + IWM_SKU);
2123 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2127 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2129 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2130 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2132 return le32_to_cpup((const uint32_t *)(nvm_sw +
2133 IWM_NVM_VERSION_8000));
2137 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2138 const uint16_t *phy_sku)
2140 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2141 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2143 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2147 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2151 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2152 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2154 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2156 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2160 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2163 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2164 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2165 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2166 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2167 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2171 /* set the radio configuration for family 8000 */
2172 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2173 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2174 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2175 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2176 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2177 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2181 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2182 const uint16_t *nvm_hw, const uint16_t *mac_override)
2184 #ifdef notyet /* for FAMILY 9000 */
2185 if (cfg->mac_addr_from_csr) {
2186 iwm_set_hw_address_from_csr(sc, data);
2189 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2190 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2192 /* The byte order is little endian 16 bit, meaning 214365 */
2193 data->hw_addr[0] = hw_addr[1];
2194 data->hw_addr[1] = hw_addr[0];
2195 data->hw_addr[2] = hw_addr[3];
2196 data->hw_addr[3] = hw_addr[2];
2197 data->hw_addr[4] = hw_addr[5];
2198 data->hw_addr[5] = hw_addr[4];
2200 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2203 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2204 device_printf(sc->sc_dev, "no valid mac address was found\n");
2211 static struct iwm_nvm_data *
2212 iwm_parse_nvm_data(struct iwm_softc *sc,
2213 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2214 const uint16_t *nvm_calib, const uint16_t *mac_override,
2215 const uint16_t *phy_sku, const uint16_t *regulatory)
2217 struct iwm_nvm_data *data;
2218 uint32_t sku, radio_cfg;
2220 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2221 data = kmalloc(sizeof(*data) +
2222 IWM_NUM_CHANNELS * sizeof(uint16_t),
2223 M_DEVBUF, M_WAITOK | M_ZERO);
2225 data = kmalloc(sizeof(*data) +
2226 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2227 M_DEVBUF, M_WAITOK | M_ZERO);
2232 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2234 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2235 iwm_set_radio_cfg(sc, data, radio_cfg);
2237 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2238 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2239 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2240 data->sku_cap_11n_enable = 0;
2242 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2244 /* If no valid mac address was found - bail out */
2245 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2246 kfree(data, M_DEVBUF);
2250 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2251 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2252 IWM_NUM_CHANNELS * sizeof(uint16_t));
2254 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2255 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2262 iwm_free_nvm_data(struct iwm_nvm_data *data)
2265 kfree(data, M_DEVBUF);
2268 static struct iwm_nvm_data *
2269 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2271 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2273 /* Checking for required sections */
2274 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2275 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2276 !sections[sc->cfg->nvm_hw_section_num].data) {
2277 device_printf(sc->sc_dev,
2278 "Can't parse empty OTP/NVM sections\n");
2281 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2282 /* SW and REGULATORY sections are mandatory */
2283 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2284 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2285 device_printf(sc->sc_dev,
2286 "Can't parse empty OTP/NVM sections\n");
2289 /* MAC_OVERRIDE or at least HW section must exist */
2290 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2291 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2292 device_printf(sc->sc_dev,
2293 "Can't parse mac_address, empty sections\n");
2297 /* PHY_SKU section is mandatory in B0 */
2298 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2299 device_printf(sc->sc_dev,
2300 "Can't parse phy_sku in B0, empty sections\n");
2304 panic("unknown device family %d\n", sc->cfg->device_family);
2307 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2308 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2309 calib = (const uint16_t *)
2310 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2311 regulatory = (const uint16_t *)
2312 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2313 mac_override = (const uint16_t *)
2314 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2315 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2317 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2318 phy_sku, regulatory);
2322 iwm_nvm_init(struct iwm_softc *sc)
2324 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2325 int i, ret, section;
2326 uint32_t size_read = 0;
2327 uint8_t *nvm_buffer, *temp;
2330 memset(nvm_sections, 0, sizeof(nvm_sections));
2332 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2335 /* load NVM values from nic */
2336 /* Read From FW NVM */
2337 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2339 nvm_buffer = kmalloc(sc->cfg->eeprom_size, M_DEVBUF,
2340 M_INTWAIT | M_ZERO);
2343 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2344 /* we override the constness for initial read */
2345 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2350 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2355 memcpy(temp, nvm_buffer, len);
2357 nvm_sections[section].data = temp;
2358 nvm_sections[section].length = len;
2361 device_printf(sc->sc_dev, "OTP is blank\n");
2362 kfree(nvm_buffer, M_DEVBUF);
2364 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2367 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2368 "nvm version = %x\n", sc->nvm_data->nvm_version);
2370 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2371 if (nvm_sections[i].data != NULL)
2372 kfree(nvm_sections[i].data, M_DEVBUF);
2379 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2380 const struct iwm_fw_desc *section)
2382 struct iwm_dma_info *dma = &sc->fw_dma;
2385 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2388 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2389 "%s: [%d] uCode section being loaded...\n",
2390 __func__, section_num);
2392 v_addr = dma->vaddr;
2393 p_addr = dma->paddr;
2395 for (offset = 0; offset < section->len; offset += chunk_sz) {
2396 uint32_t copy_size, dst_addr;
2397 int extended_addr = FALSE;
2399 copy_size = MIN(chunk_sz, section->len - offset);
2400 dst_addr = section->offset + offset;
2402 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2403 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2404 extended_addr = TRUE;
2407 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2408 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2410 memcpy(v_addr, (const uint8_t *)section->data + offset,
2412 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2413 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2417 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2418 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2421 device_printf(sc->sc_dev,
2422 "%s: Could not load the [%d] uCode section\n",
2423 __func__, section_num);
2435 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2436 bus_addr_t phy_addr, uint32_t byte_cnt)
2440 sc->sc_fw_chunk_done = 0;
2442 if (!iwm_nic_lock(sc))
2445 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2446 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2448 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2451 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2452 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2454 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2455 (iwm_get_dma_hi_addr(phy_addr)
2456 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2458 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2459 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2460 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2461 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2463 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2464 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2465 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2466 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2470 /* wait up to 5s for this segment to load */
2472 while (!sc->sc_fw_chunk_done) {
2473 #if defined(__DragonFly__)
2474 ret = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", 5 * hz);
2476 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", 5 * hz);
2483 device_printf(sc->sc_dev,
2484 "fw chunk addr 0x%x len %d failed to load\n",
2485 dst_addr, byte_cnt);
2493 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2494 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2497 int i, ret = 0, sec_num = 0x1;
2498 uint32_t val, last_read_idx = 0;
2502 *first_ucode_section = 0;
2505 (*first_ucode_section)++;
2508 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2512 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2514 * PAGING_SEPARATOR_SECTION delimiter - separate between
2515 * CPU2 non paged to CPU2 paging sec.
2517 if (!image->fw_sect[i].data ||
2518 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2519 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2520 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2521 "Break since Data not valid or Empty section, sec = %d\n",
2525 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2529 /* Notify the ucode of the loaded section number and status */
2530 if (iwm_nic_lock(sc)) {
2531 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2532 val = val | (sec_num << shift_param);
2533 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2534 sec_num = (sec_num << 1) | 0x1;
2539 *first_ucode_section = last_read_idx;
2541 iwm_enable_interrupts(sc);
2543 if (iwm_nic_lock(sc)) {
2545 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2547 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2555 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2556 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2560 uint32_t last_read_idx = 0;
2564 *first_ucode_section = 0;
2567 (*first_ucode_section)++;
2570 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2574 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2576 * PAGING_SEPARATOR_SECTION delimiter - separate between
2577 * CPU2 non paged to CPU2 paging sec.
2579 if (!image->fw_sect[i].data ||
2580 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2581 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2582 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2583 "Break since Data not valid or Empty section, sec = %d\n",
2588 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2593 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2594 iwm_set_bits_prph(sc,
2595 IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2596 (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2597 IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2598 IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2601 *first_ucode_section = last_read_idx;
2608 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2609 const struct iwm_fw_sects *image)
2612 int first_ucode_section;
2614 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2615 image->is_dual_cpus ? "Dual" : "Single");
2617 /* load to FW the binary non secured sections of CPU1 */
2618 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2622 if (image->is_dual_cpus) {
2623 /* set CPU2 header address */
2625 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2626 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2628 /* load to FW the binary sections of CPU2 */
2629 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2630 &first_ucode_section);
2635 iwm_enable_interrupts(sc);
2637 /* release CPU reset */
2638 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2644 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2645 const struct iwm_fw_sects *image)
2648 int first_ucode_section;
2650 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2651 image->is_dual_cpus ? "Dual" : "Single");
2653 /* configure the ucode to be ready to get the secured image */
2654 /* release CPU reset */
2655 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2657 /* load to FW the binary Secured sections of CPU1 */
2658 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2659 &first_ucode_section);
2663 /* load to FW the binary sections of CPU2 */
2664 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2665 &first_ucode_section);
2668 /* XXX Get rid of this definition */
2670 iwm_enable_fw_load_int(struct iwm_softc *sc)
2672 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2673 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2674 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2677 /* XXX Add proper rfkill support code */
2679 iwm_start_fw(struct iwm_softc *sc,
2680 const struct iwm_fw_sects *fw)
2684 /* This may fail if AMT took ownership of the device */
2685 if (iwm_prepare_card_hw(sc)) {
2686 device_printf(sc->sc_dev,
2687 "%s: Exit HW not ready\n", __func__);
2692 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2694 iwm_disable_interrupts(sc);
2696 /* make sure rfkill handshake bits are cleared */
2697 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2698 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2699 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2701 /* clear (again), then enable host interrupts */
2702 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2704 ret = iwm_nic_init(sc);
2706 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2711 * Now, we load the firmware and don't want to be interrupted, even
2712 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2713 * FH_TX interrupt which is needed to load the firmware). If the
2714 * RF-Kill switch is toggled, we will find out after having loaded
2715 * the firmware and return the proper value to the caller.
2717 iwm_enable_fw_load_int(sc);
2719 /* really make sure rfkill handshake bits are cleared */
2720 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2721 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2723 /* Load the given image to the HW */
2724 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2725 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2727 ret = iwm_pcie_load_given_ucode(sc, fw);
2729 /* XXX re-check RF-Kill state */
2736 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2738 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2739 .valid = htole32(valid_tx_ant),
2742 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2743 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2747 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2749 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2750 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2752 /* Set parameters */
2753 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2754 phy_cfg_cmd.calib_control.event_trigger =
2755 sc->sc_default_calib[ucode_type].event_trigger;
2756 phy_cfg_cmd.calib_control.flow_trigger =
2757 sc->sc_default_calib[ucode_type].flow_trigger;
2759 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2760 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2761 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2762 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2766 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2768 struct iwm_mvm_alive_data *alive_data = data;
2769 struct iwm_mvm_alive_resp_ver1 *palive1;
2770 struct iwm_mvm_alive_resp_ver2 *palive2;
2771 struct iwm_mvm_alive_resp *palive;
2773 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2774 palive1 = (void *)pkt->data;
2776 sc->support_umac_log = FALSE;
2777 sc->error_event_table =
2778 le32toh(palive1->error_event_table_ptr);
2779 sc->log_event_table =
2780 le32toh(palive1->log_event_table_ptr);
2781 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2783 alive_data->valid = le16toh(palive1->status) ==
2784 IWM_ALIVE_STATUS_OK;
2785 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2786 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2787 le16toh(palive1->status), palive1->ver_type,
2788 palive1->ver_subtype, palive1->flags);
2789 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2790 palive2 = (void *)pkt->data;
2791 sc->error_event_table =
2792 le32toh(palive2->error_event_table_ptr);
2793 sc->log_event_table =
2794 le32toh(palive2->log_event_table_ptr);
2795 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2796 sc->umac_error_event_table =
2797 le32toh(palive2->error_info_addr);
2799 alive_data->valid = le16toh(palive2->status) ==
2800 IWM_ALIVE_STATUS_OK;
2801 if (sc->umac_error_event_table)
2802 sc->support_umac_log = TRUE;
2804 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2805 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2806 le16toh(palive2->status), palive2->ver_type,
2807 palive2->ver_subtype, palive2->flags);
2809 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2810 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2811 palive2->umac_major, palive2->umac_minor);
2812 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2813 palive = (void *)pkt->data;
2815 sc->error_event_table =
2816 le32toh(palive->error_event_table_ptr);
2817 sc->log_event_table =
2818 le32toh(palive->log_event_table_ptr);
2819 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2820 sc->umac_error_event_table =
2821 le32toh(palive->error_info_addr);
2823 alive_data->valid = le16toh(palive->status) ==
2824 IWM_ALIVE_STATUS_OK;
2825 if (sc->umac_error_event_table)
2826 sc->support_umac_log = TRUE;
2828 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2829 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2830 le16toh(palive->status), palive->ver_type,
2831 palive->ver_subtype, palive->flags);
2833 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2834 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2835 le32toh(palive->umac_major),
2836 le32toh(palive->umac_minor));
2843 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2844 struct iwm_rx_packet *pkt, void *data)
2846 struct iwm_phy_db *phy_db = data;
2848 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2849 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2850 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2851 __func__, pkt->hdr.code);
2856 if (iwm_phy_db_set_section(phy_db, pkt)) {
2857 device_printf(sc->sc_dev,
2858 "%s: iwm_phy_db_set_section failed\n", __func__);
2865 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2866 enum iwm_ucode_type ucode_type)
2868 struct iwm_notification_wait alive_wait;
2869 struct iwm_mvm_alive_data alive_data;
2870 const struct iwm_fw_sects *fw;
2871 enum iwm_ucode_type old_type = sc->cur_ucode;
2873 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2875 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2876 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2880 fw = &sc->sc_fw.fw_sects[ucode_type];
2881 sc->cur_ucode = ucode_type;
2882 sc->ucode_loaded = FALSE;
2884 memset(&alive_data, 0, sizeof(alive_data));
2885 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2886 alive_cmd, NELEM(alive_cmd),
2887 iwm_alive_fn, &alive_data);
2889 error = iwm_start_fw(sc, fw);
2891 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2892 sc->cur_ucode = old_type;
2893 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2898 * Some things may run in the background now, but we
2899 * just wait for the ALIVE notification here.
2902 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2903 IWM_MVM_UCODE_ALIVE_TIMEOUT);
2906 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2907 device_printf(sc->sc_dev,
2908 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2909 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2910 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2912 sc->cur_ucode = old_type;
2916 if (!alive_data.valid) {
2917 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2919 sc->cur_ucode = old_type;
2923 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2926 * configure and operate fw paging mechanism.
2927 * driver configures the paging flow only once, CPU2 paging image
2928 * included in the IWM_UCODE_INIT image.
2930 if (fw->paging_mem_size) {
2931 error = iwm_save_fw_paging(sc, fw);
2933 device_printf(sc->sc_dev,
2934 "%s: failed to save the FW paging image\n",
2939 error = iwm_send_paging_cmd(sc, fw);
2941 device_printf(sc->sc_dev,
2942 "%s: failed to send the paging cmd\n", __func__);
2943 iwm_free_fw_paging(sc);
2949 sc->ucode_loaded = TRUE;
2958 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2960 struct iwm_notification_wait calib_wait;
2961 static const uint16_t init_complete[] = {
2962 IWM_INIT_COMPLETE_NOTIF,
2963 IWM_CALIB_RES_NOTIF_PHY_DB
2967 /* do not operate with rfkill switch turned on */
2968 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2969 device_printf(sc->sc_dev,
2970 "radio is disabled by hardware switch\n");
2974 iwm_init_notification_wait(sc->sc_notif_wait,
2977 NELEM(init_complete),
2978 iwm_wait_phy_db_entry,
2981 /* Will also start the device */
2982 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2984 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2991 ret = iwm_nvm_init(sc);
2993 device_printf(sc->sc_dev, "failed to read nvm\n");
2996 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3000 ret = iwm_send_bt_init_conf(sc);
3002 device_printf(sc->sc_dev,
3003 "failed to send bt coex configuration: %d\n", ret);
3007 /* Init Smart FIFO. */
3008 ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3012 /* Send TX valid antennas before triggering calibrations */
3013 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3015 device_printf(sc->sc_dev,
3016 "failed to send antennas before calibration: %d\n", ret);
3021 * Send phy configurations command to init uCode
3022 * to start the 16.0 uCode init image internal calibrations.
3024 ret = iwm_send_phy_cfg_cmd(sc);
3026 device_printf(sc->sc_dev,
3027 "%s: Failed to run INIT calibrations: %d\n",
3033 * Nothing to do but wait for the init complete notification
3034 * from the firmware.
3037 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3038 IWM_MVM_UCODE_CALIB_TIMEOUT);
3045 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3054 /* (re)stock rx ring, called at init-time and at runtime */
3056 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3058 struct iwm_rx_ring *ring = &sc->rxq;
3059 struct iwm_rx_data *data = &ring->data[idx];
3061 bus_dmamap_t dmamap = NULL;
3062 bus_dma_segment_t seg;
3065 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3069 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3070 #if defined(__DragonFly__)
3071 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
3072 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
3074 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3075 &seg, &nsegs, BUS_DMA_NOWAIT);
3078 device_printf(sc->sc_dev,
3079 "%s: can't map mbuf, error %d\n", __func__, error);
3083 if (data->m != NULL)
3084 bus_dmamap_unload(ring->data_dmat, data->map);
3086 /* Swap ring->spare_map with data->map */
3088 data->map = ring->spare_map;
3089 ring->spare_map = dmamap;
3091 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3094 /* Update RX descriptor. */
3095 KKASSERT((seg.ds_addr & 255) == 0);
3096 ring->desc[idx] = htole32(seg.ds_addr >> 8);
3097 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3098 BUS_DMASYNC_PREWRITE);
3106 #define IWM_RSSI_OFFSET 50
3108 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3110 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3111 uint32_t agc_a, agc_b;
3114 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3115 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3116 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3118 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3119 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3120 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3123 * dBm = rssi dB - agc dB - constant.
3124 * Higher AGC (higher radio gain) means lower signal.
3126 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3127 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3128 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3130 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3131 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3132 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3134 return max_rssi_dbm;
3138 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3139 * values are reported by the fw as positive values - need to negate
3140 * to obtain their dBM. Account for missing antennas by replacing 0
3141 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3144 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3146 int energy_a, energy_b, energy_c, max_energy;
3149 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3150 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3151 IWM_RX_INFO_ENERGY_ANT_A_POS;
3152 energy_a = energy_a ? -energy_a : -256;
3153 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3154 IWM_RX_INFO_ENERGY_ANT_B_POS;
3155 energy_b = energy_b ? -energy_b : -256;
3156 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3157 IWM_RX_INFO_ENERGY_ANT_C_POS;
3158 energy_c = energy_c ? -energy_c : -256;
3159 max_energy = MAX(energy_a, energy_b);
3160 max_energy = MAX(max_energy, energy_c);
3162 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3163 "energy In A %d B %d C %d , and max %d\n",
3164 energy_a, energy_b, energy_c, max_energy);
3170 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3172 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3174 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3176 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3180 * Retrieve the average noise (in dBm) among receivers.
3183 iwm_get_noise(struct iwm_softc *sc,
3184 const struct iwm_mvm_statistics_rx_non_phy *stats)
3186 int i, total, nbant, noise;
3188 total = nbant = noise = 0;
3189 for (i = 0; i < 3; i++) {
3190 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3191 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3192 __func__, i, noise);
3200 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3201 __func__, nbant, total);
3203 /* There should be at least one antenna but check anyway. */
3204 return (nbant == 0) ? -127 : (total / nbant) - 107;
3206 /* For now, just hard-code it to -96 to be safe */
3212 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3214 * Handles the actual data of the Rx packet from the fw
3217 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3218 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3220 struct ieee80211com *ic = &sc->sc_ic;
3221 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3222 struct ieee80211_frame *wh;
3223 struct ieee80211_node *ni;
3224 struct ieee80211_rx_stats rxs;
3226 struct iwm_rx_phy_info *phy_info;
3227 struct iwm_rx_mpdu_res_start *rx_res;
3229 uint32_t rx_pkt_status;
3232 phy_info = &sc->sc_last_phy_info;
3233 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3234 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3235 len = le16toh(rx_res->byte_count);
3236 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3239 m->m_data = pkt->data + sizeof(*rx_res);
3240 m->m_pkthdr.len = m->m_len = len;
3242 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3243 device_printf(sc->sc_dev,
3244 "dsp size out of range [0,20]: %d\n",
3245 phy_info->cfg_phy_cnt);
3249 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3250 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3251 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3252 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3256 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3257 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3259 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3261 /* Note: RSSI is absolute (ie a -ve value) */
3262 if (rssi < IWM_MIN_DBM)
3264 else if (rssi > IWM_MAX_DBM)
3267 /* Map it to relative value */
3268 rssi = rssi - sc->sc_noise;
3270 /* replenish ring for the buffer we're going to feed to the sharks */
3271 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3272 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3277 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3278 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3280 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3282 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3283 "%s: phy_info: channel=%d, flags=0x%08x\n",
3285 le16toh(phy_info->channel),
3286 le16toh(phy_info->phy_flags));
3289 * Populate an RX state struct with the provided information.
3291 bzero(&rxs, sizeof(rxs));
3292 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3293 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3294 rxs.c_ieee = le16toh(phy_info->channel);
3295 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3296 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3298 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3300 /* rssi is in 1/2db units */
3301 rxs.rssi = rssi * 2;
3302 rxs.nf = sc->sc_noise;
3304 if (ieee80211_radiotap_active_vap(vap)) {
3305 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3308 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3309 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3310 tap->wr_chan_freq = htole16(rxs.c_freq);
3311 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3312 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3313 tap->wr_dbm_antsignal = (int8_t)rssi;
3314 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3315 tap->wr_tsft = phy_info->system_timestamp;
3316 switch (phy_info->rate) {
3318 case 10: tap->wr_rate = 2; break;
3319 case 20: tap->wr_rate = 4; break;
3320 case 55: tap->wr_rate = 11; break;
3321 case 110: tap->wr_rate = 22; break;
3323 case 0xd: tap->wr_rate = 12; break;
3324 case 0xf: tap->wr_rate = 18; break;
3325 case 0x5: tap->wr_rate = 24; break;
3326 case 0x7: tap->wr_rate = 36; break;
3327 case 0x9: tap->wr_rate = 48; break;
3328 case 0xb: tap->wr_rate = 72; break;
3329 case 0x1: tap->wr_rate = 96; break;
3330 case 0x3: tap->wr_rate = 108; break;
3331 /* Unknown rate: should not happen. */
3332 default: tap->wr_rate = 0;
3338 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3339 ieee80211_input_mimo(ni, m, &rxs);
3340 ieee80211_free_node(ni);
3342 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3343 ieee80211_input_mimo_all(ic, m, &rxs);
3349 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3350 struct iwm_node *in)
3352 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3353 struct ieee80211_node *ni = &in->in_ni;
3354 struct ieee80211vap *vap = ni->ni_vap;
3355 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3356 int failack = tx_resp->failure_frame;
3358 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3360 /* Update rate control statistics. */
3361 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3363 (int) le16toh(tx_resp->status.status),
3364 (int) le16toh(tx_resp->status.sequence),
3365 tx_resp->frame_count,
3366 tx_resp->bt_kill_count,
3367 tx_resp->failure_rts,
3368 tx_resp->failure_frame,
3369 le32toh(tx_resp->initial_rate),
3370 (int) le16toh(tx_resp->wireless_media_time));
3372 if (status != IWM_TX_STATUS_SUCCESS &&
3373 status != IWM_TX_STATUS_DIRECT_DONE) {
3374 ieee80211_ratectl_tx_complete(vap, ni,
3375 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3378 ieee80211_ratectl_tx_complete(vap, ni,
3379 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3385 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3387 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3388 int idx = cmd_hdr->idx;
3389 int qid = cmd_hdr->qid;
3390 struct iwm_tx_ring *ring = &sc->txq[qid];
3391 struct iwm_tx_data *txd = &ring->data[idx];
3392 struct iwm_node *in = txd->in;
3393 struct mbuf *m = txd->m;
3396 KASSERT(txd->done == 0, ("txd not done"));
3397 KASSERT(txd->in != NULL, ("txd without node"));
3398 KASSERT(txd->m != NULL, ("txd without mbuf"));
3400 sc->sc_tx_timer = 0;
3402 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3404 /* Unmap and free mbuf. */
3405 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3406 bus_dmamap_unload(ring->data_dmat, txd->map);
3408 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3409 "free txd %p, in %p\n", txd, txd->in);
3414 ieee80211_tx_complete(&in->in_ni, m, status);
3416 if (--ring->queued < IWM_TX_RING_LOMARK) {
3417 sc->qfullmsk &= ~(1 << ring->qid);
3418 if (sc->qfullmsk == 0) {
3429 * Process a "command done" firmware notification. This is where we wakeup
3430 * processes waiting for a synchronous command completion.
3434 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3436 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3437 struct iwm_tx_data *data;
3439 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3440 return; /* Not a command ack. */
3443 data = &ring->data[pkt->hdr.idx];
3445 /* If the command was mapped in an mbuf, free it. */
3446 if (data->m != NULL) {
3447 bus_dmamap_sync(ring->data_dmat, data->map,
3448 BUS_DMASYNC_POSTWRITE);
3449 bus_dmamap_unload(ring->data_dmat, data->map);
3453 wakeup(&ring->desc[pkt->hdr.idx]);
3455 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3456 device_printf(sc->sc_dev,
3457 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3458 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3459 /* XXX call iwm_force_nmi() */
3462 KKASSERT(ring->queued > 0);
3464 if (ring->queued == 0)
3465 iwm_pcie_clear_cmd_in_flight(sc);
3470 * necessary only for block ack mode
3473 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3476 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3479 scd_bc_tbl = sc->sched_dma.vaddr;
3481 len += 8; /* magic numbers came naturally from paris */
3482 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3483 len = roundup(len, 4) / 4;
3485 w_val = htole16(sta_id << 12 | len);
3487 /* Update TX scheduler. */
3488 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3489 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3490 BUS_DMASYNC_PREWRITE);
3492 /* I really wonder what this is ?!? */
3493 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3494 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3495 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3496 BUS_DMASYNC_PREWRITE);
3502 * Take an 802.11 (non-n) rate, find the relevant rate
3503 * table entry. return the index into in_ridx[].
3505 * The caller then uses that index back into in_ridx
3506 * to figure out the rate index programmed /into/
3507 * the firmware for this given node.
3510 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3516 for (i = 0; i < nitems(in->in_ridx); i++) {
3517 r = iwm_rates[in->in_ridx[i]].rate;
3521 /* XXX Return the first */
3522 /* XXX TODO: have it return the /lowest/ */
3527 * Fill in the rate related information for a transmit command.
3529 static const struct iwm_rate *
3530 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3531 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3533 struct ieee80211com *ic = &sc->sc_ic;
3534 struct ieee80211_node *ni = &in->in_ni;
3535 const struct iwm_rate *rinfo;
3536 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3537 int ridx, rate_flags;
3539 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3540 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3543 * XXX TODO: everything about the rate selection here is terrible!
3546 if (type == IEEE80211_FC0_TYPE_DATA) {
3548 /* for data frames, use RS table */
3549 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3550 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3551 ridx = in->in_ridx[i];
3553 /* This is the index into the programmed table */
3554 tx->initial_rate_index = i;
3555 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3556 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3557 "%s: start with i=%d, txrate %d\n",
3558 __func__, i, iwm_rates[ridx].rate);
3561 * For non-data, use the lowest supported rate for the given
3564 * Note: there may not be any rate control information available.
3565 * This driver currently assumes if we're transmitting data
3566 * frames, use the rate control table. Grr.
3568 * XXX TODO: use the configured rate for the traffic type!
3569 * XXX TODO: this should be per-vap, not curmode; as we later
3570 * on we'll want to handle off-channel stuff (eg TDLS).
3572 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3574 * XXX this assumes the mode is either 11a or not 11a;
3575 * definitely won't work for 11n.
3577 ridx = IWM_RIDX_OFDM;
3579 ridx = IWM_RIDX_CCK;
3583 rinfo = &iwm_rates[ridx];
3585 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3588 !! (IWM_RIDX_IS_CCK(ridx))
3591 /* XXX TODO: hard-coded TX antenna? */
3592 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3593 if (IWM_RIDX_IS_CCK(ridx))
3594 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3595 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3602 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3604 struct ieee80211com *ic = &sc->sc_ic;
3605 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3606 struct iwm_node *in = IWM_NODE(ni);
3607 struct iwm_tx_ring *ring;
3608 struct iwm_tx_data *data;
3609 struct iwm_tfd *desc;
3610 struct iwm_device_cmd *cmd;
3611 struct iwm_tx_cmd *tx;
3612 struct ieee80211_frame *wh;
3613 struct ieee80211_key *k = NULL;
3614 #if !defined(__DragonFly__)
3617 const struct iwm_rate *rinfo;
3620 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3623 int i, totlen, error, pad;
3625 wh = mtod(m, struct ieee80211_frame *);
3626 hdrlen = ieee80211_anyhdrsize(wh);
3627 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3629 ring = &sc->txq[ac];
3630 desc = &ring->desc[ring->cur];
3631 memset(desc, 0, sizeof(*desc));
3632 data = &ring->data[ring->cur];
3634 /* Fill out iwm_tx_cmd to send to the firmware */
3635 cmd = &ring->cmd[ring->cur];
3636 cmd->hdr.code = IWM_TX_CMD;
3638 cmd->hdr.qid = ring->qid;
3639 cmd->hdr.idx = ring->cur;
3641 tx = (void *)cmd->data;
3642 memset(tx, 0, sizeof(*tx));
3644 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3646 /* Encrypt the frame if need be. */
3647 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3648 /* Retrieve key for TX && do software encryption. */
3649 k = ieee80211_crypto_encap(ni, m);
3654 /* 802.11 header may have moved. */
3655 wh = mtod(m, struct ieee80211_frame *);
3658 if (ieee80211_radiotap_active_vap(vap)) {
3659 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3662 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3663 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3664 tap->wt_rate = rinfo->rate;
3666 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3667 ieee80211_radiotap_tx(vap, m);
3671 totlen = m->m_pkthdr.len;
3674 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3675 flags |= IWM_TX_CMD_FLG_ACK;
3678 if (type == IEEE80211_FC0_TYPE_DATA
3679 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3680 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3681 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3684 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3685 type != IEEE80211_FC0_TYPE_DATA)
3686 tx->sta_id = sc->sc_aux_sta.sta_id;
3688 tx->sta_id = IWM_STATION_ID;
3690 if (type == IEEE80211_FC0_TYPE_MGT) {
3691 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3693 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3694 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3695 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3696 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3697 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3699 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3702 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3706 /* First segment length must be a multiple of 4. */
3707 flags |= IWM_TX_CMD_FLG_MH_PAD;
3708 pad = 4 - (hdrlen & 3);
3712 tx->driver_txop = 0;
3713 tx->next_frame_len = 0;
3715 tx->len = htole16(totlen);
3716 tx->tid_tspec = tid;
3717 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3719 /* Set physical address of "scratch area". */
3720 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3721 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3723 /* Copy 802.11 header in TX command. */
3724 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3726 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3729 tx->tx_flags |= htole32(flags);
3731 /* Trim 802.11 header. */
3733 #if defined(__DragonFly__)
3734 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3735 segs, IWM_MAX_SCATTER - 2,
3736 &nsegs, BUS_DMA_NOWAIT);
3738 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3739 segs, &nsegs, BUS_DMA_NOWAIT);
3742 #if defined(__DragonFly__)
3743 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3748 if (error != EFBIG) {
3749 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3754 /* Too many DMA segments, linearize mbuf. */
3755 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3757 device_printf(sc->sc_dev,
3758 "%s: could not defrag mbuf\n", __func__);
3764 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3765 segs, &nsegs, BUS_DMA_NOWAIT);
3767 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3778 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3779 "sending txd %p, in %p\n", data, data->in);
3780 KASSERT(data->in != NULL, ("node is NULL"));
3782 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3783 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3784 ring->qid, ring->cur, totlen, nsegs,
3785 le32toh(tx->tx_flags),
3786 le32toh(tx->rate_n_flags),
3787 tx->initial_rate_index
3790 /* Fill TX descriptor. */
3791 desc->num_tbs = 2 + nsegs;
3793 desc->tbs[0].lo = htole32(data->cmd_paddr);
3794 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3796 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3797 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3798 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3799 + hdrlen + pad - TB0_SIZE) << 4);
3801 /* Other DMA segments are for data payload. */
3802 for (i = 0; i < nsegs; i++) {
3804 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3805 desc->tbs[i+2].hi_n_len = \
3806 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3807 | ((seg->ds_len) << 4);
3810 bus_dmamap_sync(ring->data_dmat, data->map,
3811 BUS_DMASYNC_PREWRITE);
3812 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3813 BUS_DMASYNC_PREWRITE);
3814 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3815 BUS_DMASYNC_PREWRITE);
3818 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3822 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3823 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3825 /* Mark TX ring as full if we reach a certain threshold. */
3826 if (++ring->queued > IWM_TX_RING_HIMARK) {
3827 sc->qfullmsk |= 1 << ring->qid;
3834 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3835 const struct ieee80211_bpf_params *params)
3837 struct ieee80211com *ic = ni->ni_ic;
3838 struct iwm_softc *sc = ic->ic_softc;
3841 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3842 "->%s begin\n", __func__);
3844 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3846 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3847 "<-%s not RUNNING\n", __func__);
3853 if (params == NULL) {
3854 error = iwm_tx(sc, m, ni, 0);
3856 error = iwm_tx(sc, m, ni, 0);
3858 sc->sc_tx_timer = 5;
3869 * Note that there are transports that buffer frames before they reach
3870 * the firmware. This means that after flush_tx_path is called, the
3871 * queue might not be empty. The race-free way to handle this is to:
3872 * 1) set the station as draining
3873 * 2) flush the Tx path
3874 * 3) wait for the transport queues to be empty
3877 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3880 struct iwm_tx_path_flush_cmd flush_cmd = {
3881 .queues_ctl = htole32(tfd_msk),
3882 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3885 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3886 sizeof(flush_cmd), &flush_cmd);
3888 device_printf(sc->sc_dev,
3889 "Flushing tx queue failed: %d\n", ret);
3894 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3895 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3897 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3901 /* send station add/update command to firmware */
3903 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3905 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3909 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3911 add_sta_cmd.sta_id = IWM_STATION_ID;
3912 add_sta_cmd.mac_id_n_color
3913 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3914 IWM_DEFAULT_COLOR));
3917 for (ac = 0; ac < WME_NUM_AC; ac++) {
3918 add_sta_cmd.tfd_queue_msk |=
3919 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3921 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3923 add_sta_cmd.add_modify = update ? 1 : 0;
3924 add_sta_cmd.station_flags_msk
3925 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3926 add_sta_cmd.tid_disable_tx = htole16(0xffff);
3928 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3930 status = IWM_ADD_STA_SUCCESS;
3931 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3936 case IWM_ADD_STA_SUCCESS:
3940 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3948 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3950 return iwm_mvm_sta_send_to_fw(sc, in, 0);
3954 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3956 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3960 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3961 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3963 struct iwm_mvm_add_sta_cmd_v7 cmd;
3967 memset(&cmd, 0, sizeof(cmd));
3968 cmd.sta_id = sta->sta_id;
3969 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3971 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3972 cmd.tid_disable_tx = htole16(0xffff);
3975 IEEE80211_ADDR_COPY(cmd.addr, addr);
3977 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3982 case IWM_ADD_STA_SUCCESS:
3983 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3984 "%s: Internal station added.\n", __func__);
3987 device_printf(sc->sc_dev,
3988 "%s: Add internal station failed, status=0x%x\n",
3997 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4001 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4002 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4004 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4008 ret = iwm_mvm_add_int_sta_common(sc,
4009 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4012 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4017 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4019 struct iwm_time_quota_cmd cmd;
4020 int i, idx, ret, num_active_macs, quota, quota_rem;
4021 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4022 int n_ifs[IWM_MAX_BINDINGS] = {0, };
4025 memset(&cmd, 0, sizeof(cmd));
4027 /* currently, PHY ID == binding ID */
4029 id = in->in_phyctxt->id;
4030 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4031 colors[id] = in->in_phyctxt->color;
4038 * The FW's scheduling session consists of
4039 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4040 * equally between all the bindings that require quota
4042 num_active_macs = 0;
4043 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4044 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4045 num_active_macs += n_ifs[i];
4050 if (num_active_macs) {
4051 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4052 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4055 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4059 cmd.quotas[idx].id_and_color =
4060 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4062 if (n_ifs[i] <= 0) {
4063 cmd.quotas[idx].quota = htole32(0);
4064 cmd.quotas[idx].max_duration = htole32(0);
4066 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4067 cmd.quotas[idx].max_duration = htole32(0);
4072 /* Give the remainder of the session to the first binding */
4073 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4075 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4078 device_printf(sc->sc_dev,
4079 "%s: Failed to send quota: %d\n", __func__, ret);
4084 * ieee80211 routines
4088 * Change to AUTH state in 80211 state machine. Roughly matches what
4089 * Linux does in bss_info_changed().
4092 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4094 struct ieee80211_node *ni;
4095 struct iwm_node *in;
4096 struct iwm_vap *iv = IWM_VAP(vap);
4101 * XXX i have a feeling that the vap node is being
4102 * freed from underneath us. Grr.
4104 ni = ieee80211_ref_node(vap->iv_bss);
4106 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4107 "%s: called; vap=%p, bss ni=%p\n",
4114 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4118 error = iwm_allow_mcast(vap, sc);
4120 device_printf(sc->sc_dev,
4121 "%s: failed to set multicast\n", __func__);
4126 * This is where it deviates from what Linux does.
4128 * Linux iwlwifi doesn't reset the nic each time, nor does it
4129 * call ctxt_add() here. Instead, it adds it during vap creation,
4130 * and always does a mac_ctx_changed().
4132 * The openbsd port doesn't attempt to do that - it reset things
4133 * at odd states and does the add here.
4135 * So, until the state handling is fixed (ie, we never reset
4136 * the NIC except for a firmware failure, which should drag
4137 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4138 * contexts that are required), let's do a dirty hack here.
4140 if (iv->is_uploaded) {
4141 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4142 device_printf(sc->sc_dev,
4143 "%s: failed to update MAC\n", __func__);
4146 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4147 in->in_ni.ni_chan, 1, 1)) != 0) {
4148 device_printf(sc->sc_dev,
4149 "%s: failed update phy ctxt\n", __func__);
4152 in->in_phyctxt = &sc->sc_phyctxt[0];
4154 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4155 device_printf(sc->sc_dev,
4156 "%s: binding update cmd\n", __func__);
4159 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4160 device_printf(sc->sc_dev,
4161 "%s: failed to update sta\n", __func__);
4165 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4166 device_printf(sc->sc_dev,
4167 "%s: failed to add MAC\n", __func__);
4170 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4171 in->in_ni.ni_chan, 1, 1)) != 0) {
4172 device_printf(sc->sc_dev,
4173 "%s: failed add phy ctxt!\n", __func__);
4177 in->in_phyctxt = &sc->sc_phyctxt[0];
4179 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4180 device_printf(sc->sc_dev,
4181 "%s: binding add cmd\n", __func__);
4184 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4185 device_printf(sc->sc_dev,
4186 "%s: failed to add sta\n", __func__);
4192 * Prevent the FW from wandering off channel during association
4193 * by "protecting" the session with a time event.
4195 /* XXX duration is in units of TU, not MS */
4196 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4197 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4202 ieee80211_free_node(ni);
4207 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4209 struct iwm_node *in = IWM_NODE(vap->iv_bss);
4212 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4213 device_printf(sc->sc_dev,
4214 "%s: failed to update STA\n", __func__);
4219 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4220 device_printf(sc->sc_dev,
4221 "%s: failed to update MAC\n", __func__);
4229 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4234 * Ok, so *technically* the proper set of calls for going
4235 * from RUN back to SCAN is:
4237 * iwm_mvm_power_mac_disable(sc, in);
4238 * iwm_mvm_mac_ctxt_changed(sc, in);
4239 * iwm_mvm_rm_sta(sc, in);
4240 * iwm_mvm_update_quotas(sc, NULL);
4241 * iwm_mvm_mac_ctxt_changed(sc, in);
4242 * iwm_mvm_binding_remove_vif(sc, in);
4243 * iwm_mvm_mac_ctxt_remove(sc, in);
4245 * However, that freezes the device not matter which permutations
4246 * and modifications are attempted. Obviously, this driver is missing
4247 * something since it works in the Linux driver, but figuring out what
4248 * is missing is a little more complicated. Now, since we're going
4249 * back to nothing anyway, we'll just do a complete device reset.
4250 * Up your's, device!
4253 * Just using 0xf for the queues mask is fine as long as we only
4254 * get here from RUN state.
4257 mbufq_drain(&sc->sc_snd);
4258 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4260 * We seem to get away with just synchronously sending the
4261 * IWM_TXPATH_FLUSH command.
4263 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4264 iwm_stop_device(sc);
4273 iwm_mvm_power_mac_disable(sc, in);
4275 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4276 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4280 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4281 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4284 error = iwm_mvm_rm_sta(sc, in);
4286 iwm_mvm_update_quotas(sc, NULL);
4287 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4288 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4291 iwm_mvm_binding_remove_vif(sc, in);
4293 iwm_mvm_mac_ctxt_remove(sc, in);
4299 static struct ieee80211_node *
4300 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4302 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4303 M_INTWAIT | M_ZERO);
4307 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4309 struct ieee80211_node *ni = &in->in_ni;
4310 struct iwm_lq_cmd *lq = &in->in_lq;
4311 int nrates = ni->ni_rates.rs_nrates;
4312 int i, ridx, tab = 0;
4315 if (nrates > nitems(lq->rs_table)) {
4316 device_printf(sc->sc_dev,
4317 "%s: node supports %d rates, driver handles "
4318 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4322 device_printf(sc->sc_dev,
4323 "%s: node supports 0 rates, odd!\n", __func__);
4328 * XXX .. and most of iwm_node is not initialised explicitly;
4329 * it's all just 0x0 passed to the firmware.
4332 /* first figure out which rates we should support */
4333 /* XXX TODO: this isn't 11n aware /at all/ */
4334 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4335 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4336 "%s: nrates=%d\n", __func__, nrates);
4339 * Loop over nrates and populate in_ridx from the highest
4340 * rate to the lowest rate. Remember, in_ridx[] has
4341 * IEEE80211_RATE_MAXSIZE entries!
4343 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4344 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4346 /* Map 802.11 rate to HW rate index. */
4347 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4348 if (iwm_rates[ridx].rate == rate)
4350 if (ridx > IWM_RIDX_MAX) {
4351 device_printf(sc->sc_dev,
4352 "%s: WARNING: device rate for %d not found!\n",
4355 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4356 "%s: rate: i: %d, rate=%d, ridx=%d\n",
4361 in->in_ridx[i] = ridx;
4365 /* then construct a lq_cmd based on those */
4366 memset(lq, 0, sizeof(*lq));
4367 lq->sta_id = IWM_STATION_ID;
4369 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4370 if (ni->ni_flags & IEEE80211_NODE_HT)
4371 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4374 * are these used? (we don't do SISO or MIMO)
4375 * need to set them to non-zero, though, or we get an error.
4377 lq->single_stream_ant_msk = 1;
4378 lq->dual_stream_ant_msk = 1;
4381 * Build the actual rate selection table.
4382 * The lowest bits are the rates. Additionally,
4383 * CCK needs bit 9 to be set. The rest of the bits
4384 * we add to the table select the tx antenna
4385 * Note that we add the rates in the highest rate first
4386 * (opposite of ni_rates).
4389 * XXX TODO: this should be looping over the min of nrates
4390 * and LQ_MAX_RETRY_NUM. Sigh.
4392 for (i = 0; i < nrates; i++) {
4396 txant = iwm_mvm_get_valid_tx_ant(sc);
4397 nextant = 1<<(ffs(txant)-1);
4401 * Map the rate id into a rate index into
4402 * our hardware table containing the
4403 * configuration to use for this rate.
4405 ridx = in->in_ridx[i];
4406 tab = iwm_rates[ridx].plcp;
4407 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4408 if (IWM_RIDX_IS_CCK(ridx))
4409 tab |= IWM_RATE_MCS_CCK_MSK;
4410 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4411 "station rate i=%d, rate=%d, hw=%x\n",
4412 i, iwm_rates[ridx].rate, tab);
4413 lq->rs_table[i] = htole32(tab);
4415 /* then fill the rest with the lowest possible rate */
4416 for (i = nrates; i < nitems(lq->rs_table); i++) {
4417 KASSERT(tab != 0, ("invalid tab"));
4418 lq->rs_table[i] = htole32(tab);
4423 iwm_media_change(struct ifnet *ifp)
4425 struct ieee80211vap *vap = ifp->if_softc;
4426 struct ieee80211com *ic = vap->iv_ic;
4427 struct iwm_softc *sc = ic->ic_softc;
4430 error = ieee80211_media_change(ifp);
4431 if (error != ENETRESET)
4435 if (ic->ic_nrunning > 0) {
4445 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4447 struct iwm_vap *ivp = IWM_VAP(vap);
4448 struct ieee80211com *ic = vap->iv_ic;
4449 struct iwm_softc *sc = ic->ic_softc;
4450 struct iwm_node *in;
4453 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4454 "switching state %s -> %s\n",
4455 ieee80211_state_name[vap->iv_state],
4456 ieee80211_state_name[nstate]);
4457 IEEE80211_UNLOCK(ic);
4460 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4461 iwm_led_blink_stop(sc);
4463 /* disable beacon filtering if we're hopping out of RUN */
4464 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4465 iwm_mvm_disable_beacon_filter(sc);
4467 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4470 if (nstate == IEEE80211_S_INIT) {
4473 error = ivp->iv_newstate(vap, nstate, arg);
4474 IEEE80211_UNLOCK(ic);
4476 iwm_release(sc, NULL);
4483 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4484 * above then the card will be completely reinitialized,
4485 * so the driver must do everything necessary to bring the card
4486 * from INIT to SCAN.
4488 * Additionally, upon receiving deauth frame from AP,
4489 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4490 * state. This will also fail with this driver, so bring the FSM
4491 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4493 * XXX TODO: fix this for FreeBSD!
4495 if (nstate == IEEE80211_S_SCAN ||
4496 nstate == IEEE80211_S_AUTH ||
4497 nstate == IEEE80211_S_ASSOC) {
4498 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4499 "Force transition to INIT; MGT=%d\n", arg);
4502 /* Always pass arg as -1 since we can't Tx right now. */
4504 * XXX arg is just ignored anyway when transitioning
4505 * to IEEE80211_S_INIT.
4507 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4508 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4509 "Going INIT->SCAN\n");
4510 nstate = IEEE80211_S_SCAN;
4511 IEEE80211_UNLOCK(ic);
4517 case IEEE80211_S_INIT:
4520 case IEEE80211_S_AUTH:
4521 if ((error = iwm_auth(vap, sc)) != 0) {
4522 device_printf(sc->sc_dev,
4523 "%s: could not move to auth state: %d\n",
4529 case IEEE80211_S_ASSOC:
4530 if ((error = iwm_assoc(vap, sc)) != 0) {
4531 device_printf(sc->sc_dev,
4532 "%s: failed to associate: %d\n", __func__,
4538 case IEEE80211_S_RUN:
4540 struct iwm_host_cmd cmd = {
4542 .len = { sizeof(in->in_lq), },
4543 .flags = IWM_CMD_SYNC,
4546 /* Update the association state, now we have it all */
4547 /* (eg associd comes in at this point */
4548 error = iwm_assoc(vap, sc);
4550 device_printf(sc->sc_dev,
4551 "%s: failed to update association state: %d\n",
4557 in = IWM_NODE(vap->iv_bss);
4558 iwm_mvm_power_mac_update_mode(sc, in);
4559 iwm_mvm_enable_beacon_filter(sc, in);
4560 iwm_mvm_update_quotas(sc, in);
4561 iwm_setrates(sc, in);
4563 cmd.data[0] = &in->in_lq;
4564 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4565 device_printf(sc->sc_dev,
4566 "%s: IWM_LQ_CMD failed\n", __func__);
4569 iwm_mvm_led_enable(sc);
4579 return (ivp->iv_newstate(vap, nstate, arg));
4583 iwm_endscan_cb(void *arg, int pending)
4585 struct iwm_softc *sc = arg;
4586 struct ieee80211com *ic = &sc->sc_ic;
4588 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4592 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4596 * Aging and idle timeouts for the different possible scenarios
4597 * in default configuration
4599 static const uint32_t
4600 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4602 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4603 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4606 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4607 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4610 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4611 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4614 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4615 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4618 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4619 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4624 * Aging and idle timeouts for the different possible scenarios
4625 * in single BSS MAC configuration.
4627 static const uint32_t
4628 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4630 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4631 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4634 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4635 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4638 htole32(IWM_SF_MCAST_AGING_TIMER),
4639 htole32(IWM_SF_MCAST_IDLE_TIMER)
4642 htole32(IWM_SF_BA_AGING_TIMER),
4643 htole32(IWM_SF_BA_IDLE_TIMER)
4646 htole32(IWM_SF_TX_RE_AGING_TIMER),
4647 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4652 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4653 struct ieee80211_node *ni)
4655 int i, j, watermark;
4657 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4660 * If we are in association flow - check antenna configuration
4661 * capabilities of the AP station, and choose the watermark accordingly.
4664 if (ni->ni_flags & IEEE80211_NODE_HT) {
4666 if (ni->ni_rxmcs[2] != 0)
4667 watermark = IWM_SF_W_MARK_MIMO3;
4668 else if (ni->ni_rxmcs[1] != 0)
4669 watermark = IWM_SF_W_MARK_MIMO2;
4672 watermark = IWM_SF_W_MARK_SISO;
4674 watermark = IWM_SF_W_MARK_LEGACY;
4676 /* default watermark value for unassociated mode. */
4678 watermark = IWM_SF_W_MARK_MIMO2;
4680 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4682 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4683 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4684 sf_cmd->long_delay_timeouts[i][j] =
4685 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4690 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4691 sizeof(iwm_sf_full_timeout));
4693 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4694 sizeof(iwm_sf_full_timeout_def));
4699 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4701 struct ieee80211com *ic = &sc->sc_ic;
4702 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4703 struct iwm_sf_cfg_cmd sf_cmd = {
4704 .state = htole32(IWM_SF_FULL_ON),
4708 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4709 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4711 switch (new_state) {
4713 case IWM_SF_INIT_OFF:
4714 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4716 case IWM_SF_FULL_ON:
4717 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4720 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4721 "Invalid state: %d. not sending Smart Fifo cmd\n",
4726 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4727 sizeof(sf_cmd), &sf_cmd);
4732 iwm_send_bt_init_conf(struct iwm_softc *sc)
4734 struct iwm_bt_coex_cmd bt_cmd;
4736 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4737 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4739 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4744 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4746 struct iwm_mcc_update_cmd mcc_cmd;
4747 struct iwm_host_cmd hcmd = {
4748 .id = IWM_MCC_UPDATE_CMD,
4749 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4750 .data = { &mcc_cmd },
4754 struct iwm_rx_packet *pkt;
4755 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4756 struct iwm_mcc_update_resp *mcc_resp;
4760 int resp_v2 = isset(sc->sc_enabled_capa,
4761 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4763 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4764 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4765 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4766 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4767 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4769 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4772 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4774 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4776 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4777 "send MCC update to FW with '%c%c' src = %d\n",
4778 alpha2[0], alpha2[1], mcc_cmd.source_id);
4780 ret = iwm_send_cmd(sc, &hcmd);
4785 pkt = hcmd.resp_pkt;
4787 /* Extract MCC response */
4789 mcc_resp = (void *)pkt->data;
4790 mcc = mcc_resp->mcc;
4791 n_channels = le32toh(mcc_resp->n_channels);
4793 mcc_resp_v1 = (void *)pkt->data;
4794 mcc = mcc_resp_v1->mcc;
4795 n_channels = le32toh(mcc_resp_v1->n_channels);
4798 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4800 mcc = 0x3030; /* "00" - world */
4802 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4803 "regulatory domain '%c%c' (%d channels available)\n",
4804 mcc >> 8, mcc & 0xff, n_channels);
4806 iwm_free_resp(sc, &hcmd);
4812 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4814 struct iwm_host_cmd cmd = {
4815 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4816 .len = { sizeof(uint32_t), },
4817 .data = { &backoff, },
4820 if (iwm_send_cmd(sc, &cmd) != 0) {
4821 device_printf(sc->sc_dev,
4822 "failed to change thermal tx backoff\n");
4827 iwm_init_hw(struct iwm_softc *sc)
4829 struct ieee80211com *ic = &sc->sc_ic;
4832 if ((error = iwm_start_hw(sc)) != 0) {
4833 kprintf("iwm_start_hw: failed %d\n", error);
4837 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4838 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4843 * should stop and start HW since that INIT
4846 iwm_stop_device(sc);
4847 if ((error = iwm_start_hw(sc)) != 0) {
4848 device_printf(sc->sc_dev, "could not initialize hardware\n");
4852 /* omstart, this time with the regular firmware */
4853 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4855 device_printf(sc->sc_dev, "could not load firmware\n");
4859 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4860 device_printf(sc->sc_dev, "bt init conf failed\n");
4864 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4866 device_printf(sc->sc_dev, "antenna config failed\n");
4870 /* Send phy db control command and then phy db calibration */
4871 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4874 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4875 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4879 /* Add auxiliary station for scanning */
4880 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4881 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4885 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4887 * The channel used here isn't relevant as it's
4888 * going to be overwritten in the other flows.
4889 * For now use the first channel we have.
4891 if ((error = iwm_mvm_phy_ctxt_add(sc,
4892 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4896 /* Initialize tx backoffs to the minimum. */
4897 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4898 iwm_mvm_tt_tx_backoff(sc, 0);
4900 error = iwm_mvm_power_update_device(sc);
4904 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4905 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4909 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4910 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4914 /* Enable Tx queues. */
4915 for (ac = 0; ac < WME_NUM_AC; ac++) {
4916 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4917 iwm_mvm_ac_to_tx_fifo[ac]);
4922 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4923 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4930 iwm_stop_device(sc);
4934 /* Allow multicast from our BSSID. */
4936 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4938 struct ieee80211_node *ni = vap->iv_bss;
4939 struct iwm_mcast_filter_cmd *cmd;
4943 size = roundup(sizeof(*cmd), 4);
4944 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4947 cmd->filter_own = 1;
4951 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4953 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4954 IWM_CMD_SYNC, size, cmd);
4955 kfree(cmd, M_DEVBUF);
4965 iwm_init(struct iwm_softc *sc)
4969 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4972 sc->sc_generation++;
4973 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4975 if ((error = iwm_init_hw(sc)) != 0) {
4976 kprintf("iwm_init_hw failed %d\n", error);
4982 * Ok, firmware loaded and we are jogging
4984 sc->sc_flags |= IWM_FLAG_HW_INITED;
4985 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4989 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4991 struct iwm_softc *sc;
4997 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5001 error = mbufq_enqueue(&sc->sc_snd, m);
5012 * Dequeue packets from sendq and call send.
5015 iwm_start(struct iwm_softc *sc)
5017 struct ieee80211_node *ni;
5021 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5022 while (sc->qfullmsk == 0 &&
5023 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5024 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5025 if (iwm_tx(sc, m, ni, ac) != 0) {
5026 if_inc_counter(ni->ni_vap->iv_ifp,
5027 IFCOUNTER_OERRORS, 1);
5028 ieee80211_free_node(ni);
5031 sc->sc_tx_timer = 15;
5033 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5037 iwm_stop(struct iwm_softc *sc)
5040 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5041 sc->sc_flags |= IWM_FLAG_STOPPED;
5042 sc->sc_generation++;
5043 iwm_led_blink_stop(sc);
5044 sc->sc_tx_timer = 0;
5045 iwm_stop_device(sc);
5046 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5050 iwm_watchdog(void *arg)
5052 struct iwm_softc *sc = arg;
5054 if (sc->sc_tx_timer > 0) {
5055 if (--sc->sc_tx_timer == 0) {
5056 device_printf(sc->sc_dev, "device timeout\n");
5061 #if defined(__DragonFly__)
5062 ++sc->sc_ic.ic_oerrors;
5064 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5069 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5073 iwm_parent(struct ieee80211com *ic)
5075 struct iwm_softc *sc = ic->ic_softc;
5079 if (ic->ic_nrunning > 0) {
5080 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5084 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5088 ieee80211_start_all(ic);
5092 * The interrupt side of things
5096 * error dumping routines are from iwlwifi/mvm/utils.c
5100 * Note: This structure is read from the device with IO accesses,
5101 * and the reading already does the endian conversion. As it is
5102 * read with uint32_t-sized accesses, any members with a different size
5103 * need to be ordered correctly though!
5105 struct iwm_error_event_table {
5106 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5107 uint32_t error_id; /* type of error */
5108 uint32_t trm_hw_status0; /* TRM HW status */
5109 uint32_t trm_hw_status1; /* TRM HW status */
5110 uint32_t blink2; /* branch link */
5111 uint32_t ilink1; /* interrupt link */
5112 uint32_t ilink2; /* interrupt link */
5113 uint32_t data1; /* error-specific data */
5114 uint32_t data2; /* error-specific data */
5115 uint32_t data3; /* error-specific data */
5116 uint32_t bcon_time; /* beacon timer */
5117 uint32_t tsf_low; /* network timestamp function timer */
5118 uint32_t tsf_hi; /* network timestamp function timer */
5119 uint32_t gp1; /* GP1 timer register */
5120 uint32_t gp2; /* GP2 timer register */
5121 uint32_t fw_rev_type; /* firmware revision type */
5122 uint32_t major; /* uCode version major */
5123 uint32_t minor; /* uCode version minor */
5124 uint32_t hw_ver; /* HW Silicon version */
5125 uint32_t brd_ver; /* HW board version */
5126 uint32_t log_pc; /* log program counter */
5127 uint32_t frame_ptr; /* frame pointer */
5128 uint32_t stack_ptr; /* stack pointer */
5129 uint32_t hcmd; /* last host command header */
5130 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5132 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5134 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5136 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5138 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5140 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5141 uint32_t wait_event; /* wait event() caller address */
5142 uint32_t l2p_control; /* L2pControlField */
5143 uint32_t l2p_duration; /* L2pDurationField */
5144 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5145 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5146 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5148 uint32_t u_timestamp; /* indicate when the date and time of the
5150 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5151 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5154 * UMAC error struct - relevant starting from family 8000 chip.
5155 * Note: This structure is read from the device with IO accesses,
5156 * and the reading already does the endian conversion. As it is
5157 * read with u32-sized accesses, any members with a different size
5158 * need to be ordered correctly though!
5160 struct iwm_umac_error_event_table {
5161 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5162 uint32_t error_id; /* type of error */
5163 uint32_t blink1; /* branch link */
5164 uint32_t blink2; /* branch link */
5165 uint32_t ilink1; /* interrupt link */
5166 uint32_t ilink2; /* interrupt link */
5167 uint32_t data1; /* error-specific data */
5168 uint32_t data2; /* error-specific data */
5169 uint32_t data3; /* error-specific data */
5170 uint32_t umac_major;
5171 uint32_t umac_minor;
5172 uint32_t frame_pointer; /* core register 27*/
5173 uint32_t stack_pointer; /* core register 28 */
5174 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5175 uint32_t nic_isr_pref; /* ISR status register */
5178 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5179 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5185 } advanced_lookup[] = {
5186 { "NMI_INTERRUPT_WDG", 0x34 },
5187 { "SYSASSERT", 0x35 },
5188 { "UCODE_VERSION_MISMATCH", 0x37 },
5189 { "BAD_COMMAND", 0x38 },
5190 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5191 { "FATAL_ERROR", 0x3D },
5192 { "NMI_TRM_HW_ERR", 0x46 },
5193 { "NMI_INTERRUPT_TRM", 0x4C },
5194 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5195 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5196 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5197 { "NMI_INTERRUPT_HOST", 0x66 },
5198 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5199 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5200 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5201 { "ADVANCED_SYSASSERT", 0 },
5205 iwm_desc_lookup(uint32_t num)
5209 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5210 if (advanced_lookup[i].num == num)
5211 return advanced_lookup[i].name;
5213 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5214 return advanced_lookup[i].name;
5218 iwm_nic_umac_error(struct iwm_softc *sc)
5220 struct iwm_umac_error_event_table table;
5223 base = sc->umac_error_event_table;
5225 if (base < 0x800000) {
5226 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5231 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5232 device_printf(sc->sc_dev, "reading errlog failed\n");
5236 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5237 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5238 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5239 sc->sc_flags, table.valid);
5242 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5243 iwm_desc_lookup(table.error_id));
5244 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5245 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5246 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5248 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5250 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5251 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5252 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5253 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5254 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5255 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5256 table.frame_pointer);
5257 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5258 table.stack_pointer);
5259 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5260 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5261 table.nic_isr_pref);
5265 * Support for dumping the error log seemed like a good idea ...
5266 * but it's mostly hex junk and the only sensible thing is the
5267 * hw/ucode revision (which we know anyway). Since it's here,
5268 * I'll just leave it in, just in case e.g. the Intel guys want to
5269 * help us decipher some "ADVANCED_SYSASSERT" later.
5272 iwm_nic_error(struct iwm_softc *sc)
5274 struct iwm_error_event_table table;
5277 device_printf(sc->sc_dev, "dumping device error log\n");
5278 base = sc->error_event_table;
5279 if (base < 0x800000) {
5280 device_printf(sc->sc_dev,
5281 "Invalid error log pointer 0x%08x\n", base);
5285 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5286 device_printf(sc->sc_dev, "reading errlog failed\n");
5291 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5295 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5296 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5297 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5298 sc->sc_flags, table.valid);
5301 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5302 iwm_desc_lookup(table.error_id));
5303 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5304 table.trm_hw_status0);
5305 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5306 table.trm_hw_status1);
5307 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5308 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5309 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5310 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5311 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5312 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5313 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5314 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5315 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5316 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5317 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5318 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5320 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5321 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5322 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5323 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5324 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5325 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5326 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5327 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5328 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5329 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5330 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5331 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5332 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5333 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5334 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5335 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5336 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5337 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5338 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5340 if (sc->umac_error_event_table)
5341 iwm_nic_umac_error(sc);
5345 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5348 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5349 * Basic structure from if_iwn
5352 iwm_notif_intr(struct iwm_softc *sc)
5354 struct ieee80211com *ic = &sc->sc_ic;
5357 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5358 BUS_DMASYNC_POSTREAD);
5360 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5365 while (sc->rxq.cur != hw) {
5366 struct iwm_rx_ring *ring = &sc->rxq;
5367 struct iwm_rx_data *data = &ring->data[ring->cur];
5368 struct iwm_rx_packet *pkt;
5369 struct iwm_cmd_response *cresp;
5372 bus_dmamap_sync(ring->data_dmat, data->map,
5373 BUS_DMASYNC_POSTREAD);
5374 pkt = mtod(data->m, struct iwm_rx_packet *);
5376 qid = pkt->hdr.qid & ~0x80;
5379 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5380 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5381 "rx packet qid=%d idx=%d type=%x %d %d\n",
5382 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5385 * randomly get these from the firmware, no idea why.
5386 * they at least seem harmless, so just ignore them for now
5388 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5389 || pkt->len_n_flags == htole32(0x55550000))) {
5394 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5397 case IWM_REPLY_RX_PHY_CMD:
5398 iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5401 case IWM_REPLY_RX_MPDU_CMD:
5402 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5406 iwm_mvm_rx_tx_cmd(sc, pkt);
5409 case IWM_MISSED_BEACONS_NOTIFICATION: {
5410 struct iwm_missed_beacons_notif *resp;
5413 /* XXX look at mac_id to determine interface ID */
5414 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5416 resp = (void *)pkt->data;
5417 missed = le32toh(resp->consec_missed_beacons);
5419 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5420 "%s: MISSED_BEACON: mac_id=%d, "
5421 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5424 le32toh(resp->mac_id),
5425 le32toh(resp->consec_missed_beacons_since_last_rx),
5426 le32toh(resp->consec_missed_beacons),
5427 le32toh(resp->num_expected_beacons),
5428 le32toh(resp->num_recvd_beacons));
5434 /* XXX no net80211 locking? */
5435 if (vap->iv_state == IEEE80211_S_RUN &&
5436 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5437 if (missed > vap->iv_bmissthreshold) {
5438 /* XXX bad locking; turn into task */
5440 ieee80211_beacon_miss(ic);
5447 case IWM_MFUART_LOAD_NOTIFICATION:
5453 case IWM_CALIB_RES_NOTIF_PHY_DB:
5456 case IWM_STATISTICS_NOTIFICATION: {
5457 struct iwm_notif_statistics *stats;
5458 stats = (void *)pkt->data;
5459 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5460 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5463 case IWM_NVM_ACCESS_CMD:
5464 case IWM_MCC_UPDATE_CMD:
5465 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5466 memcpy(sc->sc_cmd_resp,
5467 pkt, sizeof(sc->sc_cmd_resp));
5471 case IWM_MCC_CHUB_UPDATE_CMD: {
5472 struct iwm_mcc_chub_notif *notif;
5473 notif = (void *)pkt->data;
5475 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5476 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5477 sc->sc_fw_mcc[2] = '\0';
5478 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5479 "fw source %d sent CC '%s'\n",
5480 notif->source_id, sc->sc_fw_mcc);
5483 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5484 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5485 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5486 struct iwm_dts_measurement_notif_v1 *notif;
5488 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5489 device_printf(sc->sc_dev,
5490 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5493 notif = (void *)pkt->data;
5494 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5495 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5500 case IWM_PHY_CONFIGURATION_CMD:
5501 case IWM_TX_ANT_CONFIGURATION_CMD:
5503 case IWM_MAC_CONTEXT_CMD:
5504 case IWM_REPLY_SF_CFG_CMD:
5505 case IWM_POWER_TABLE_CMD:
5506 case IWM_PHY_CONTEXT_CMD:
5507 case IWM_BINDING_CONTEXT_CMD:
5508 case IWM_TIME_EVENT_CMD:
5509 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5510 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5511 case IWM_SCAN_ABORT_UMAC:
5512 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5513 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5514 case IWM_REPLY_BEACON_FILTERING_CMD:
5515 case IWM_MAC_PM_POWER_TABLE:
5516 case IWM_TIME_QUOTA_CMD:
5517 case IWM_REMOVE_STA:
5518 case IWM_TXPATH_FLUSH:
5520 case IWM_FW_PAGING_BLOCK_CMD:
5522 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5523 cresp = (void *)pkt->data;
5524 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5525 memcpy(sc->sc_cmd_resp,
5526 pkt, sizeof(*pkt)+sizeof(*cresp));
5531 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5534 case IWM_INIT_COMPLETE_NOTIF:
5537 case IWM_SCAN_OFFLOAD_COMPLETE: {
5538 struct iwm_periodic_scan_complete *notif;
5539 notif = (void *)pkt->data;
5541 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5542 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5543 ieee80211_runtask(ic, &sc->sc_es_task);
5548 case IWM_SCAN_ITERATION_COMPLETE: {
5549 struct iwm_lmac_scan_complete_notif *notif;
5550 notif = (void *)pkt->data;
5554 case IWM_SCAN_COMPLETE_UMAC: {
5555 struct iwm_umac_scan_complete *notif;
5556 notif = (void *)pkt->data;
5558 IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5559 "UMAC scan complete, status=0x%x\n",
5562 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5563 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5564 ieee80211_runtask(ic, &sc->sc_es_task);
5569 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5570 struct iwm_umac_scan_iter_complete_notif *notif;
5571 notif = (void *)pkt->data;
5573 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5574 "complete, status=0x%x, %d channels scanned\n",
5575 notif->status, notif->scanned_channels);
5579 case IWM_REPLY_ERROR: {
5580 struct iwm_error_resp *resp;
5581 resp = (void *)pkt->data;
5583 device_printf(sc->sc_dev,
5584 "firmware error 0x%x, cmd 0x%x\n",
5585 le32toh(resp->error_type),
5589 case IWM_TIME_EVENT_NOTIFICATION: {
5590 struct iwm_time_event_notif *notif;
5591 notif = (void *)pkt->data;
5593 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5594 "TE notif status = 0x%x action = 0x%x\n",
5595 notif->status, notif->action);
5598 case IWM_MCAST_FILTER_CMD:
5601 case IWM_SCD_QUEUE_CFG: {
5602 struct iwm_scd_txq_cfg_rsp *rsp;
5603 rsp = (void *)pkt->data;
5605 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5606 "queue cfg token=0x%x sta_id=%d "
5607 "tid=%d scd_queue=%d\n",
5608 rsp->token, rsp->sta_id, rsp->tid,
5614 device_printf(sc->sc_dev,
5615 "frame %d/%d %x UNHANDLED (this should "
5616 "not happen)\n", qid, idx,
5622 * Why test bit 0x80? The Linux driver:
5624 * There is one exception: uCode sets bit 15 when it
5625 * originates the response/notification, i.e. when the
5626 * response/notification is not a direct response to a
5627 * command sent by the driver. For example, uCode issues
5628 * IWM_REPLY_RX when it sends a received frame to the driver;
5629 * it is not a direct response to any driver command.
5631 * Ok, so since when is 7 == 15? Well, the Linux driver
5632 * uses a slightly different format for pkt->hdr, and "qid"
5633 * is actually the upper byte of a two-byte field.
5635 if (!(pkt->hdr.qid & (1 << 7))) {
5636 iwm_cmd_done(sc, pkt);
5643 * Tell the firmware what we have processed.
5644 * Seems like the hardware gets upset unless we align
5647 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5648 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5654 struct iwm_softc *sc = arg;
5659 #if defined(__DragonFly__)
5660 if (sc->sc_mem == NULL) {
5661 kprintf("iwm_intr: detached\n");
5666 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5668 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5669 uint32_t *ict = sc->ict_dma.vaddr;
5672 tmp = htole32(ict[sc->ict_cur]);
5677 * ok, there was something. keep plowing until we have all.
5682 ict[sc->ict_cur] = 0;
5683 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5684 tmp = htole32(ict[sc->ict_cur]);
5687 /* this is where the fun begins. don't ask */
5688 if (r1 == 0xffffffff)
5691 /* i am not expected to understand this */
5694 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5696 r1 = IWM_READ(sc, IWM_CSR_INT);
5697 /* "hardware gone" (where, fishing?) */
5698 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5700 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5702 if (r1 == 0 && r2 == 0) {
5706 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5708 /* Safely ignore these bits for debug checks below */
5709 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5711 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5713 struct ieee80211com *ic = &sc->sc_ic;
5714 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5719 /* Dump driver status (TX and RX rings) while we're here. */
5720 device_printf(sc->sc_dev, "driver status:\n");
5721 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5722 struct iwm_tx_ring *ring = &sc->txq[i];
5723 device_printf(sc->sc_dev,
5724 " tx ring %2d: qid=%-2d cur=%-3d "
5726 i, ring->qid, ring->cur, ring->queued);
5728 device_printf(sc->sc_dev,
5729 " rx ring: cur=%d\n", sc->rxq.cur);
5730 device_printf(sc->sc_dev,
5731 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5733 /* Don't stop the device; just do a VAP restart */
5737 kprintf("%s: null vap\n", __func__);
5741 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5742 "restarting\n", __func__, vap->iv_state);
5744 /* XXX TODO: turn this into a callout/taskqueue */
5745 ieee80211_restart_all(ic);
5749 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5750 handled |= IWM_CSR_INT_BIT_HW_ERR;
5751 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5757 /* firmware chunk loaded */
5758 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5759 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5760 handled |= IWM_CSR_INT_BIT_FH_TX;
5761 sc->sc_fw_chunk_done = 1;
5765 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5766 handled |= IWM_CSR_INT_BIT_RF_KILL;
5767 if (iwm_check_rfkill(sc)) {
5768 device_printf(sc->sc_dev,
5769 "%s: rfkill switch, disabling interface\n",
5776 * The Linux driver uses periodic interrupts to avoid races.
5777 * We cargo-cult like it's going out of fashion.
5779 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5780 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5781 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5782 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5784 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5788 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5789 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5790 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5794 /* enable periodic interrupt, see above */
5795 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5796 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5797 IWM_CSR_INT_PERIODIC_ENA);
5800 if (__predict_false(r1 & ~handled))
5801 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5802 "%s: unhandled interrupts: %x\n", __func__, r1);
5806 iwm_restore_interrupts(sc);
5813 * Autoconf glue-sniffing
5815 #define PCI_VENDOR_INTEL 0x8086
5816 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5817 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5818 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5819 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5820 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5821 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5822 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5823 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5824 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5825 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5827 static const struct iwm_devices {
5829 const struct iwm_cfg *cfg;
5831 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5832 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5833 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5834 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5835 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5836 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5837 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5838 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5839 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5840 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5844 iwm_probe(device_t dev)
5848 for (i = 0; i < nitems(iwm_devices); i++) {
5849 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5850 pci_get_device(dev) == iwm_devices[i].device) {
5851 device_set_desc(dev, iwm_devices[i].cfg->name);
5852 return (BUS_PROBE_DEFAULT);
5860 iwm_dev_check(device_t dev)
5862 struct iwm_softc *sc;
5866 sc = device_get_softc(dev);
5868 devid = pci_get_device(dev);
5869 for (i = 0; i < NELEM(iwm_devices); i++) {
5870 if (iwm_devices[i].device == devid) {
5871 sc->cfg = iwm_devices[i].cfg;
5875 device_printf(dev, "unknown adapter type\n");
5880 #define PCI_CFG_RETRY_TIMEOUT 0x041
5883 iwm_pci_attach(device_t dev)
5885 struct iwm_softc *sc;
5886 int count, error, rid;
5888 #if defined(__DragonFly__)
5892 sc = device_get_softc(dev);
5894 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5895 * PCI Tx retries from interfering with C3 CPU state */
5896 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5898 /* Enable bus-mastering and hardware bug workaround. */
5899 pci_enable_busmaster(dev);
5900 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5902 if (reg & PCIM_STATUS_INTxSTATE) {
5903 reg &= ~PCIM_STATUS_INTxSTATE;
5905 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5908 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5910 if (sc->sc_mem == NULL) {
5911 device_printf(sc->sc_dev, "can't map mem space\n");
5914 sc->sc_st = rman_get_bustag(sc->sc_mem);
5915 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5917 /* Install interrupt handler. */
5920 #if defined(__DragonFly__)
5921 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5922 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5924 if (pci_alloc_msi(dev, &count) == 0)
5926 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5927 (rid != 0 ? 0 : RF_SHAREABLE));
5929 if (sc->sc_irq == NULL) {
5930 device_printf(dev, "can't map interrupt\n");
5933 #if defined(__DragonFly__)
5934 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5935 iwm_intr, sc, &sc->sc_ih,
5936 &wlan_global_serializer);
5938 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5939 NULL, iwm_intr, sc, &sc->sc_ih);
5941 if (sc->sc_ih == NULL) {
5942 device_printf(dev, "can't establish interrupt");
5943 #if defined(__DragonFly__)
5944 pci_release_msi(dev);
5948 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5954 iwm_pci_detach(device_t dev)
5956 struct iwm_softc *sc = device_get_softc(dev);
5958 if (sc->sc_irq != NULL) {
5959 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5960 bus_release_resource(dev, SYS_RES_IRQ,
5961 rman_get_rid(sc->sc_irq), sc->sc_irq);
5962 pci_release_msi(dev);
5963 #if defined(__DragonFly__)
5967 if (sc->sc_mem != NULL) {
5968 bus_release_resource(dev, SYS_RES_MEMORY,
5969 rman_get_rid(sc->sc_mem), sc->sc_mem);
5970 #if defined(__DragonFly__)
5979 iwm_attach(device_t dev)
5981 struct iwm_softc *sc = device_get_softc(dev);
5982 struct ieee80211com *ic = &sc->sc_ic;
5987 sc->sc_attached = 1;
5989 mbufq_init(&sc->sc_snd, ifqmaxlen);
5990 #if defined(__DragonFly__)
5991 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5993 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5995 callout_init(&sc->sc_led_blink_to);
5996 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5998 sc->sc_notif_wait = iwm_notification_wait_init(sc);
5999 if (sc->sc_notif_wait == NULL) {
6000 device_printf(dev, "failed to init notification wait struct\n");
6005 sc->sc_phy_db = iwm_phy_db_init(sc);
6006 if (!sc->sc_phy_db) {
6007 device_printf(dev, "Cannot init phy_db\n");
6012 error = iwm_pci_attach(dev);
6016 sc->sc_wantresp = -1;
6018 /* Check device type */
6019 error = iwm_dev_check(dev);
6023 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6025 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6026 * changed, and now the revision step also includes bit 0-1 (no more
6027 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6028 * in the old format.
6030 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6031 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6032 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6034 if (iwm_prepare_card_hw(sc) != 0) {
6035 device_printf(dev, "could not initialize hardware\n");
6039 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6044 * In order to recognize C step the driver should read the
6045 * chip version id located at the AUX bus MISC address.
6047 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6048 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6051 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6052 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6053 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6056 device_printf(sc->sc_dev,
6057 "Failed to wake up the nic\n");
6061 if (iwm_nic_lock(sc)) {
6062 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6063 hw_step |= IWM_ENABLE_WFPM;
6064 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6065 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6066 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6068 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6069 (IWM_SILICON_C_STEP << 2);
6072 device_printf(sc->sc_dev, "Failed to lock the nic\n");
6077 /* special-case 7265D, it has the same PCI IDs. */
6078 if (sc->cfg == &iwm7265_cfg &&
6079 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6080 sc->cfg = &iwm7265d_cfg;
6083 /* Allocate DMA memory for firmware transfers. */
6084 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6085 device_printf(dev, "could not allocate memory for firmware\n");
6089 /* Allocate "Keep Warm" page. */
6090 if ((error = iwm_alloc_kw(sc)) != 0) {
6091 device_printf(dev, "could not allocate keep warm page\n");
6095 /* We use ICT interrupts */
6096 if ((error = iwm_alloc_ict(sc)) != 0) {
6097 device_printf(dev, "could not allocate ICT table\n");
6101 /* Allocate TX scheduler "rings". */
6102 if ((error = iwm_alloc_sched(sc)) != 0) {
6103 device_printf(dev, "could not allocate TX scheduler rings\n");
6107 /* Allocate TX rings */
6108 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6109 if ((error = iwm_alloc_tx_ring(sc,
6110 &sc->txq[txq_i], txq_i)) != 0) {
6112 "could not allocate TX ring %d\n",
6118 /* Allocate RX ring. */
6119 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6120 device_printf(dev, "could not allocate RX ring\n");
6124 /* Clear pending interrupts. */
6125 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6128 ic->ic_name = device_get_nameunit(sc->sc_dev);
6129 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6130 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6132 /* Set device capabilities. */
6135 IEEE80211_C_WPA | /* WPA/RSN */
6137 IEEE80211_C_SHSLOT | /* short slot time supported */
6138 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6139 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6141 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6142 sc->sc_phyctxt[i].id = i;
6143 sc->sc_phyctxt[i].color = 0;
6144 sc->sc_phyctxt[i].ref = 0;
6145 sc->sc_phyctxt[i].channel = NULL;
6148 /* Default noise floor */
6152 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6154 sc->sc_preinit_hook.ich_func = iwm_preinit;
6155 sc->sc_preinit_hook.ich_arg = sc;
6156 sc->sc_preinit_hook.ich_desc = "iwm";
6157 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6158 device_printf(dev, "config_intrhook_establish failed\n");
6163 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6164 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6165 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6168 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6169 "<-%s\n", __func__);
6173 /* Free allocated memory if something failed during attachment. */
6175 iwm_detach_local(sc, 0);
6181 iwm_is_valid_ether_addr(uint8_t *addr)
6183 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6185 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6192 iwm_update_edca(struct ieee80211com *ic)
6194 struct iwm_softc *sc = ic->ic_softc;
6196 device_printf(sc->sc_dev, "%s: called\n", __func__);
6201 iwm_preinit(void *arg)
6203 struct iwm_softc *sc = arg;
6204 device_t dev = sc->sc_dev;
6205 struct ieee80211com *ic = &sc->sc_ic;
6208 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6209 "->%s\n", __func__);
6212 if ((error = iwm_start_hw(sc)) != 0) {
6213 device_printf(dev, "could not initialize hardware\n");
6218 error = iwm_run_init_mvm_ucode(sc, 1);
6219 iwm_stop_device(sc);
6225 "hw rev 0x%x, fw ver %s, address %s\n",
6226 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6227 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6229 /* not all hardware can do 5GHz band */
6230 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6231 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6232 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6235 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6239 * At this point we've committed - if we fail to do setup,
6240 * we now also have to tear down the net80211 state.
6242 ieee80211_ifattach(ic);
6243 ic->ic_vap_create = iwm_vap_create;
6244 ic->ic_vap_delete = iwm_vap_delete;
6245 ic->ic_raw_xmit = iwm_raw_xmit;
6246 ic->ic_node_alloc = iwm_node_alloc;
6247 ic->ic_scan_start = iwm_scan_start;
6248 ic->ic_scan_end = iwm_scan_end;
6249 ic->ic_update_mcast = iwm_update_mcast;
6250 ic->ic_getradiocaps = iwm_init_channel_map;
6251 ic->ic_set_channel = iwm_set_channel;
6252 ic->ic_scan_curchan = iwm_scan_curchan;
6253 ic->ic_scan_mindwell = iwm_scan_mindwell;
6254 ic->ic_wme.wme_update = iwm_update_edca;
6255 ic->ic_parent = iwm_parent;
6256 ic->ic_transmit = iwm_transmit;
6257 iwm_radiotap_attach(sc);
6259 ieee80211_announce(ic);
6261 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6262 "<-%s\n", __func__);
6263 config_intrhook_disestablish(&sc->sc_preinit_hook);
6267 config_intrhook_disestablish(&sc->sc_preinit_hook);
6268 iwm_detach_local(sc, 0);
6272 * Attach the interface to 802.11 radiotap.
6275 iwm_radiotap_attach(struct iwm_softc *sc)
6277 struct ieee80211com *ic = &sc->sc_ic;
6279 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6280 "->%s begin\n", __func__);
6281 ieee80211_radiotap_attach(ic,
6282 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6283 IWM_TX_RADIOTAP_PRESENT,
6284 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6285 IWM_RX_RADIOTAP_PRESENT);
6286 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6287 "->%s end\n", __func__);
6290 static struct ieee80211vap *
6291 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6292 enum ieee80211_opmode opmode, int flags,
6293 const uint8_t bssid[IEEE80211_ADDR_LEN],
6294 const uint8_t mac[IEEE80211_ADDR_LEN])
6296 struct iwm_vap *ivp;
6297 struct ieee80211vap *vap;
6299 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6301 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6303 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6304 vap->iv_bmissthreshold = 10; /* override default */
6305 /* Override with driver methods. */
6306 ivp->iv_newstate = vap->iv_newstate;
6307 vap->iv_newstate = iwm_newstate;
6309 ieee80211_ratectl_init(vap);
6310 /* Complete setup. */
6311 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6313 ic->ic_opmode = opmode;
6319 iwm_vap_delete(struct ieee80211vap *vap)
6321 struct iwm_vap *ivp = IWM_VAP(vap);
6323 ieee80211_ratectl_deinit(vap);
6324 ieee80211_vap_detach(vap);
6325 kfree(ivp, M_80211_VAP);
6329 iwm_scan_start(struct ieee80211com *ic)
6331 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6332 struct iwm_softc *sc = ic->ic_softc;
6336 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6337 /* This should not be possible */
6338 device_printf(sc->sc_dev,
6339 "%s: Previous scan not completed yet\n", __func__);
6341 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6342 error = iwm_mvm_umac_scan(sc);
6344 error = iwm_mvm_lmac_scan(sc);
6346 device_printf(sc->sc_dev, "could not initiate scan\n");
6348 ieee80211_cancel_scan(vap);
6350 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6351 iwm_led_blink_start(sc);
6357 iwm_scan_end(struct ieee80211com *ic)
6359 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6360 struct iwm_softc *sc = ic->ic_softc;
6363 iwm_led_blink_stop(sc);
6364 if (vap->iv_state == IEEE80211_S_RUN)
6365 iwm_mvm_led_enable(sc);
6366 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6368 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6369 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6372 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6373 iwm_mvm_scan_stop_wait(sc);
6378 * Make sure we don't race, if sc_es_task is still enqueued here.
6379 * This is to make sure that it won't call ieee80211_scan_done
6380 * when we have already started the next scan.
6382 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6386 iwm_update_mcast(struct ieee80211com *ic)
6391 iwm_set_channel(struct ieee80211com *ic)
6396 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6401 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6407 iwm_init_task(void *arg1)
6409 struct iwm_softc *sc = arg1;
6412 while (sc->sc_flags & IWM_FLAG_BUSY) {
6413 #if defined(__DragonFly__)
6414 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6416 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6419 sc->sc_flags |= IWM_FLAG_BUSY;
6421 if (sc->sc_ic.ic_nrunning > 0)
6423 sc->sc_flags &= ~IWM_FLAG_BUSY;
6424 wakeup(&sc->sc_flags);
6429 iwm_resume(device_t dev)
6431 struct iwm_softc *sc = device_get_softc(dev);
6435 * We disable the RETRY_TIMEOUT register (0x41) to keep
6436 * PCI Tx retries from interfering with C3 CPU state.
6438 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6439 iwm_init_task(device_get_softc(dev));
6442 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6443 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6449 ieee80211_resume_all(&sc->sc_ic);
6455 iwm_suspend(device_t dev)
6458 struct iwm_softc *sc = device_get_softc(dev);
6460 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6462 ieee80211_suspend_all(&sc->sc_ic);
6467 sc->sc_flags |= IWM_FLAG_SCANNING;
6475 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6477 struct iwm_fw_info *fw = &sc->sc_fw;
6478 device_t dev = sc->sc_dev;
6481 if (!sc->sc_attached)
6483 sc->sc_attached = 0;
6485 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6487 callout_drain(&sc->sc_led_blink_to);
6488 callout_drain(&sc->sc_watchdog_to);
6489 iwm_stop_device(sc);
6491 ieee80211_ifdetach(&sc->sc_ic);
6494 iwm_phy_db_free(sc->sc_phy_db);
6495 sc->sc_phy_db = NULL;
6497 iwm_free_nvm_data(sc->nvm_data);
6499 /* Free descriptor rings */
6500 iwm_free_rx_ring(sc, &sc->rxq);
6501 for (i = 0; i < nitems(sc->txq); i++)
6502 iwm_free_tx_ring(sc, &sc->txq[i]);
6505 if (fw->fw_fp != NULL)
6506 iwm_fw_info_free(fw);
6508 /* Free scheduler */
6509 iwm_dma_contig_free(&sc->sched_dma);
6510 iwm_dma_contig_free(&sc->ict_dma);
6511 iwm_dma_contig_free(&sc->kw_dma);
6512 iwm_dma_contig_free(&sc->fw_dma);
6514 iwm_free_fw_paging(sc);
6516 /* Finished with the hardware - detach things */
6517 iwm_pci_detach(dev);
6519 if (sc->sc_notif_wait != NULL) {
6520 iwm_notification_wait_free(sc->sc_notif_wait);
6521 sc->sc_notif_wait = NULL;
6524 mbufq_drain(&sc->sc_snd);
6525 IWM_LOCK_DESTROY(sc);
6531 iwm_detach(device_t dev)
6533 struct iwm_softc *sc = device_get_softc(dev);
6535 return (iwm_detach_local(sc, 1));
6538 static device_method_t iwm_pci_methods[] = {
6539 /* Device interface */
6540 DEVMETHOD(device_probe, iwm_probe),
6541 DEVMETHOD(device_attach, iwm_attach),
6542 DEVMETHOD(device_detach, iwm_detach),
6543 DEVMETHOD(device_suspend, iwm_suspend),
6544 DEVMETHOD(device_resume, iwm_resume),
6549 static driver_t iwm_pci_driver = {
6552 sizeof (struct iwm_softc)
6555 static devclass_t iwm_devclass;
6557 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6558 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6559 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6560 MODULE_DEPEND(iwm, wlan, 1, 1, 1);