1 /* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
122 * wlan_serialize_enter()/exit() hacks (will be removable when we
123 * do the device netif removal).
124 * header file paths (DFly allows localized path specifications).
125 * minor header file differences.
127 * Comprehensive list of adjustments for DragonFly #ifdef'd:
128 * (safety) added register read-back serialization in iwm_reset_rx_ring().
130 * RUNNING and OACTIVE tests
131 * msleep -> iwmsleep (handle deadlocks due to dfly interrupt serializer)
132 * mtx -> lk (mtx functions -> lockmgr functions)
133 * callout differences
134 * taskqueue differences
135 * iwm_start() and ifq differences
136 * iwm_ioctl() differences
138 * bus_setup_intr() differences
139 * minor PCI config register naming differences
141 #include <sys/cdefs.h>
142 __FBSDID("$FreeBSD$");
144 #include <sys/param.h>
146 #include <sys/endian.h>
147 #include <sys/firmware.h>
148 #include <sys/kernel.h>
149 #include <sys/malloc.h>
150 #include <sys/mbuf.h>
151 #include <sys/mutex.h>
152 #include <sys/module.h>
153 #include <sys/proc.h>
154 #include <sys/rman.h>
155 #include <sys/socket.h>
156 #include <sys/sockio.h>
157 #include <sys/sysctl.h>
158 #include <sys/linker.h>
160 #include <machine/endian.h>
162 #include <bus/pci/pcivar.h>
163 #include <bus/pci/pcireg.h>
168 #include <net/if_var.h>
169 #include <net/if_arp.h>
170 #include <net/ethernet.h>
171 #include <net/if_dl.h>
172 #include <net/if_media.h>
173 #include <net/if_types.h>
174 #include <net/ifq_var.h>
176 #include <netinet/in.h>
177 #include <netinet/in_systm.h>
178 #include <netinet/if_ether.h>
179 #include <netinet/ip.h>
181 #include <netproto/802_11/ieee80211_var.h>
182 #include <netproto/802_11/ieee80211_regdomain.h>
183 #include <netproto/802_11/ieee80211_ratectl.h>
184 #include <netproto/802_11/ieee80211_radiotap.h>
186 #include "if_iwmreg.h"
187 #include "if_iwmvar.h"
188 #include "if_iwm_debug.h"
189 #include "if_iwm_util.h"
190 #include "if_iwm_binding.h"
191 #include "if_iwm_phy_db.h"
192 #include "if_iwm_mac_ctxt.h"
193 #include "if_iwm_phy_ctxt.h"
194 #include "if_iwm_time_event.h"
195 #include "if_iwm_power.h"
196 #include "if_iwm_scan.h"
197 #include "if_iwm_pcie_trans.h"
199 const uint8_t iwm_nvm_channels[] = {
201 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
203 36, 40, 44 , 48, 52, 56, 60, 64,
204 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
205 149, 153, 157, 161, 165
207 #define IWM_NUM_2GHZ_CHANNELS 14
210 * XXX For now, there's simply a fixed set of rate table entries
211 * that are populated.
213 const struct iwm_rate {
217 { 2, IWM_RATE_1M_PLCP },
218 { 4, IWM_RATE_2M_PLCP },
219 { 11, IWM_RATE_5M_PLCP },
220 { 22, IWM_RATE_11M_PLCP },
221 { 12, IWM_RATE_6M_PLCP },
222 { 18, IWM_RATE_9M_PLCP },
223 { 24, IWM_RATE_12M_PLCP },
224 { 36, IWM_RATE_18M_PLCP },
225 { 48, IWM_RATE_24M_PLCP },
226 { 72, IWM_RATE_36M_PLCP },
227 { 96, IWM_RATE_48M_PLCP },
228 { 108, IWM_RATE_54M_PLCP },
230 #define IWM_RIDX_CCK 0
231 #define IWM_RIDX_OFDM 4
232 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
233 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
234 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
236 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
237 static int iwm_firmware_store_section(struct iwm_softc *,
239 const uint8_t *, size_t);
240 static int iwm_set_default_calib(struct iwm_softc *, const void *);
241 static void iwm_fw_info_free(struct iwm_fw_info *);
242 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
244 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
245 bus_size_t, bus_size_t);
246 static void iwm_dma_contig_free(struct iwm_dma_info *);
247 static int iwm_alloc_fwmem(struct iwm_softc *);
248 static void iwm_free_fwmem(struct iwm_softc *);
249 static int iwm_alloc_sched(struct iwm_softc *);
250 static void iwm_free_sched(struct iwm_softc *);
251 static int iwm_alloc_kw(struct iwm_softc *);
252 static void iwm_free_kw(struct iwm_softc *);
253 static int iwm_alloc_ict(struct iwm_softc *);
254 static void iwm_free_ict(struct iwm_softc *);
255 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
257 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
258 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
260 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
261 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
262 static void iwm_enable_interrupts(struct iwm_softc *);
263 static void iwm_restore_interrupts(struct iwm_softc *);
264 static void iwm_disable_interrupts(struct iwm_softc *);
265 static void iwm_ict_reset(struct iwm_softc *);
266 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
267 static void iwm_stop_device(struct iwm_softc *);
268 static void iwm_mvm_nic_config(struct iwm_softc *);
269 static int iwm_nic_rx_init(struct iwm_softc *);
270 static int iwm_nic_tx_init(struct iwm_softc *);
271 static int iwm_nic_init(struct iwm_softc *);
272 static void iwm_enable_txq(struct iwm_softc *, int, int);
273 static int iwm_post_alive(struct iwm_softc *);
274 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
275 uint16_t, uint8_t *, uint16_t *);
276 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
278 static void iwm_init_channel_map(struct iwm_softc *,
279 const uint16_t * const);
280 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281 const uint16_t *, const uint16_t *, uint8_t,
283 struct iwm_nvm_section;
284 static int iwm_parse_nvm_sections(struct iwm_softc *,
285 struct iwm_nvm_section *);
286 static int iwm_nvm_init(struct iwm_softc *);
287 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
288 const uint8_t *, uint32_t);
289 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
290 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
291 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
292 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
293 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
294 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
295 enum iwm_ucode_type);
296 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
297 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
298 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
299 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
300 struct iwm_rx_phy_info *);
301 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
302 struct iwm_rx_packet *,
303 struct iwm_rx_data *);
304 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
305 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
306 struct iwm_rx_data *);
307 static void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
308 struct iwm_rx_packet *,
310 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
311 struct iwm_rx_data *);
312 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
314 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
317 static const struct iwm_rate *
318 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
319 struct ieee80211_frame *, struct iwm_tx_cmd *);
320 static int iwm_tx(struct iwm_softc *, struct mbuf *,
321 struct ieee80211_node *, int);
322 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
323 const struct ieee80211_bpf_params *);
324 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
325 struct iwm_mvm_add_sta_cmd_v5 *);
326 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
327 struct iwm_mvm_add_sta_cmd_v6 *,
329 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
331 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
332 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
333 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
334 struct iwm_int_sta *,
335 const uint8_t *, uint16_t, uint16_t);
336 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
337 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
338 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
339 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
340 static int iwm_release(struct iwm_softc *, struct iwm_node *);
341 static struct ieee80211_node *
342 iwm_node_alloc(struct ieee80211vap *,
343 const uint8_t[IEEE80211_ADDR_LEN]);
344 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
345 static int iwm_media_change(struct ifnet *);
346 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
347 static void iwm_endscan_cb(void *, int);
348 static int iwm_init_hw(struct iwm_softc *);
349 static void iwm_init(void *);
350 static void iwm_init_locked(struct iwm_softc *);
351 #if defined(__DragonFly__)
352 static void iwm_start(struct ifnet *, struct ifaltq_subque *);
354 static void iwm_start(struct ifnet *);
356 static void iwm_start_locked(struct ifnet *);
357 static void iwm_stop(struct ifnet *, int);
358 static void iwm_stop_locked(struct ifnet *);
359 static void iwm_watchdog(void *);
360 #if defined(__DragonFly__)
361 static int iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t, struct ucred *cred);
363 static int iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
367 iwm_desc_lookup(uint32_t);
368 static void iwm_nic_error(struct iwm_softc *);
370 static void iwm_notif_intr(struct iwm_softc *);
371 static void iwm_intr(void *);
372 static int iwm_attach(device_t);
373 static void iwm_preinit(void *);
374 static int iwm_detach_local(struct iwm_softc *sc, int);
375 static void iwm_init_task(void *);
376 static void iwm_radiotap_attach(struct iwm_softc *);
377 static struct ieee80211vap *
378 iwm_vap_create(struct ieee80211com *,
379 const char [IFNAMSIZ], int,
380 enum ieee80211_opmode, int,
381 const uint8_t [IEEE80211_ADDR_LEN],
382 const uint8_t [IEEE80211_ADDR_LEN]);
383 static void iwm_vap_delete(struct ieee80211vap *);
384 static void iwm_scan_start(struct ieee80211com *);
385 static void iwm_scan_end(struct ieee80211com *);
386 static void iwm_update_mcast(struct ifnet *);
387 static void iwm_set_channel(struct ieee80211com *);
388 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
389 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
390 static int iwm_detach(device_t);
392 #if defined(__DragonFly__)
393 static int iwm_msi_enable = 1;
395 TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
398 * This is a hack due to the wlan_serializer deadlocking sleepers.
400 int iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to);
403 iwmsleep(void *chan, struct lock *lk, int flags, const char *wmesg, int to)
407 if (wlan_is_serialized()) {
408 wlan_serialize_exit();
409 error = lksleep(chan, lk, flags, wmesg, to);
410 lockmgr(lk, LK_RELEASE);
411 wlan_serialize_enter();
412 lockmgr(lk, LK_EXCLUSIVE);
414 error = lksleep(chan, lk, flags, wmesg, to);
426 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
428 const struct iwm_fw_cscheme_list *l = (const void *)data;
430 if (dlen < sizeof(*l) ||
431 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
434 /* we don't actually store anything for now, always use s/w crypto */
440 iwm_firmware_store_section(struct iwm_softc *sc,
441 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
443 struct iwm_fw_sects *fws;
444 struct iwm_fw_onesect *fwone;
446 if (type >= IWM_UCODE_TYPE_MAX)
448 if (dlen < sizeof(uint32_t))
451 fws = &sc->sc_fw.fw_sects[type];
452 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
455 fwone = &fws->fw_sect[fws->fw_count];
457 /* first 32bit are device load offset */
458 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
461 fwone->fws_data = data + sizeof(uint32_t);
462 fwone->fws_len = dlen - sizeof(uint32_t);
465 fws->fw_totlen += fwone->fws_len;
470 /* iwlwifi: iwl-drv.c */
471 struct iwm_tlv_calib_data {
473 struct iwm_tlv_calib_ctrl calib;
477 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
479 const struct iwm_tlv_calib_data *def_calib = data;
480 uint32_t ucode_type = le32toh(def_calib->ucode_type);
482 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
483 device_printf(sc->sc_dev,
484 "Wrong ucode_type %u for default "
485 "calibration.\n", ucode_type);
489 sc->sc_default_calib[ucode_type].flow_trigger =
490 def_calib->calib.flow_trigger;
491 sc->sc_default_calib[ucode_type].event_trigger =
492 def_calib->calib.event_trigger;
498 iwm_fw_info_free(struct iwm_fw_info *fw)
500 firmware_put(fw->fw_rawdata, FIRMWARE_UNLOAD);
501 fw->fw_rawdata = NULL;
503 /* don't touch fw->fw_status */
504 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
508 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
510 struct iwm_fw_info *fw = &sc->sc_fw;
511 const struct iwm_tlv_ucode_header *uhdr;
512 struct iwm_ucode_tlv tlv;
513 enum iwm_ucode_tlv_type tlv_type;
514 const struct firmware *fwp;
519 if (fw->fw_status == IWM_FW_STATUS_DONE &&
520 ucode_type != IWM_UCODE_TYPE_INIT)
523 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
524 #if defined(__DragonFly__)
525 iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
527 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
530 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
532 if (fw->fw_rawdata != NULL)
533 iwm_fw_info_free(fw);
536 * Load firmware into driver memory.
537 * fw_rawdata and fw_rawsize will be set.
540 fwp = firmware_get(sc->sc_fwname);
542 device_printf(sc->sc_dev,
543 "could not read firmware %s (error %d)\n",
544 sc->sc_fwname, error);
549 fw->fw_rawdata = fwp->data;
550 fw->fw_rawsize = fwp->datasize;
553 * Parse firmware contents
556 uhdr = (const void *)fw->fw_rawdata;
557 if (*(const uint32_t *)fw->fw_rawdata != 0
558 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
559 device_printf(sc->sc_dev, "invalid firmware %s\n",
565 sc->sc_fwver = le32toh(uhdr->ver);
567 len = fw->fw_rawsize - sizeof(*uhdr);
569 while (len >= sizeof(tlv)) {
571 const void *tlv_data;
573 memcpy(&tlv, data, sizeof(tlv));
574 tlv_len = le32toh(tlv.length);
575 tlv_type = le32toh(tlv.type);
582 device_printf(sc->sc_dev,
583 "firmware too short: %zu bytes\n",
589 switch ((int)tlv_type) {
590 case IWM_UCODE_TLV_PROBE_MAX_LEN:
591 if (tlv_len < sizeof(uint32_t)) {
592 device_printf(sc->sc_dev,
593 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
599 sc->sc_capa_max_probe_len
600 = le32toh(*(const uint32_t *)tlv_data);
601 /* limit it to something sensible */
602 if (sc->sc_capa_max_probe_len > (1<<16)) {
603 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
604 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
605 "ridiculous\n", __func__);
610 case IWM_UCODE_TLV_PAN:
612 device_printf(sc->sc_dev,
613 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
619 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
621 case IWM_UCODE_TLV_FLAGS:
622 if (tlv_len < sizeof(uint32_t)) {
623 device_printf(sc->sc_dev,
624 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
631 * Apparently there can be many flags, but Linux driver
632 * parses only the first one, and so do we.
634 * XXX: why does this override IWM_UCODE_TLV_PAN?
635 * Intentional or a bug? Observations from
636 * current firmware file:
637 * 1) TLV_PAN is parsed first
638 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
639 * ==> this resets TLV_PAN to itself... hnnnk
641 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
643 case IWM_UCODE_TLV_CSCHEME:
644 if ((error = iwm_store_cscheme(sc,
645 tlv_data, tlv_len)) != 0) {
646 device_printf(sc->sc_dev,
647 "%s: iwm_store_cscheme(): returned %d\n",
653 case IWM_UCODE_TLV_NUM_OF_CPU:
654 if (tlv_len != sizeof(uint32_t)) {
655 device_printf(sc->sc_dev,
656 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
662 if (le32toh(*(const uint32_t*)tlv_data) != 1) {
663 device_printf(sc->sc_dev,
664 "%s: driver supports "
665 "only TLV_NUM_OF_CPU == 1",
671 case IWM_UCODE_TLV_SEC_RT:
672 if ((error = iwm_firmware_store_section(sc,
673 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
674 device_printf(sc->sc_dev,
675 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
681 case IWM_UCODE_TLV_SEC_INIT:
682 if ((error = iwm_firmware_store_section(sc,
683 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
684 device_printf(sc->sc_dev,
685 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
691 case IWM_UCODE_TLV_SEC_WOWLAN:
692 if ((error = iwm_firmware_store_section(sc,
693 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
694 device_printf(sc->sc_dev,
695 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
701 case IWM_UCODE_TLV_DEF_CALIB:
702 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
703 device_printf(sc->sc_dev,
704 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
707 (int) sizeof(struct iwm_tlv_calib_data));
711 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
712 device_printf(sc->sc_dev,
713 "%s: iwm_set_default_calib() failed: %d\n",
719 case IWM_UCODE_TLV_PHY_SKU:
720 if (tlv_len != sizeof(uint32_t)) {
722 device_printf(sc->sc_dev,
723 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
728 sc->sc_fw_phy_config =
729 le32toh(*(const uint32_t *)tlv_data);
732 case IWM_UCODE_TLV_API_CHANGES_SET:
733 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
734 /* ignore, not used by current driver */
738 device_printf(sc->sc_dev,
739 "%s: unknown firmware section %d, abort\n",
745 len -= roundup(tlv_len, 4);
746 data += roundup(tlv_len, 4);
749 KASSERT(error == 0, ("unhandled error"));
753 device_printf(sc->sc_dev, "firmware parse error %d, "
754 "section type %d\n", error, tlv_type);
757 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
758 device_printf(sc->sc_dev,
759 "device uses unsupported power ops\n");
765 fw->fw_status = IWM_FW_STATUS_NONE;
766 if (fw->fw_rawdata != NULL)
767 iwm_fw_info_free(fw);
769 fw->fw_status = IWM_FW_STATUS_DONE;
776 * DMA resource routines
780 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
784 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
785 *(bus_addr_t *)arg = segs[0].ds_addr;
789 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
790 bus_size_t size, bus_size_t alignment)
797 #if defined(__DragonFly__)
798 error = bus_dma_tag_create(tag, alignment,
800 BUS_SPACE_MAXADDR_32BIT,
804 BUS_DMA_NOWAIT, &dma->tag);
806 error = bus_dma_tag_create(tag, alignment,
807 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
808 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
813 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
814 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
818 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
819 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
823 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
828 iwm_dma_contig_free(dma);
834 iwm_dma_contig_free(struct iwm_dma_info *dma)
836 if (dma->map != NULL) {
837 if (dma->vaddr != NULL) {
838 bus_dmamap_sync(dma->tag, dma->map,
839 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
840 bus_dmamap_unload(dma->tag, dma->map);
841 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
844 bus_dmamap_destroy(dma->tag, dma->map);
847 if (dma->tag != NULL) {
848 bus_dma_tag_destroy(dma->tag);
854 /* fwmem is used to load firmware onto the card */
856 iwm_alloc_fwmem(struct iwm_softc *sc)
858 /* Must be aligned on a 16-byte boundary. */
859 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
860 sc->sc_fwdmasegsz, 16);
864 iwm_free_fwmem(struct iwm_softc *sc)
866 iwm_dma_contig_free(&sc->fw_dma);
869 /* tx scheduler rings. not used? */
871 iwm_alloc_sched(struct iwm_softc *sc)
875 /* TX scheduler rings must be aligned on a 1KB boundary. */
876 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
877 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
882 iwm_free_sched(struct iwm_softc *sc)
884 iwm_dma_contig_free(&sc->sched_dma);
887 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
889 iwm_alloc_kw(struct iwm_softc *sc)
891 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
895 iwm_free_kw(struct iwm_softc *sc)
897 iwm_dma_contig_free(&sc->kw_dma);
900 /* interrupt cause table */
902 iwm_alloc_ict(struct iwm_softc *sc)
904 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
905 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
909 iwm_free_ict(struct iwm_softc *sc)
911 iwm_dma_contig_free(&sc->ict_dma);
915 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
922 /* Allocate RX descriptors (256-byte aligned). */
923 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
924 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
926 device_printf(sc->sc_dev,
927 "could not allocate RX ring DMA memory\n");
930 ring->desc = ring->desc_dma.vaddr;
932 /* Allocate RX status area (16-byte aligned). */
933 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
934 sizeof(*ring->stat), 16);
936 device_printf(sc->sc_dev,
937 "could not allocate RX status DMA memory\n");
940 ring->stat = ring->stat_dma.vaddr;
942 /* Create RX buffer DMA tag. */
943 #if defined(__DragonFly__)
944 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
946 BUS_SPACE_MAXADDR_32BIT,
949 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
950 BUS_DMA_NOWAIT, &ring->data_dmat);
952 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
953 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
954 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
958 device_printf(sc->sc_dev,
959 "%s: could not create RX buf DMA tag, error %d\n",
965 * Allocate and map RX buffers.
967 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
968 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
974 fail: iwm_free_rx_ring(sc, ring);
979 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
981 /* XXX conditional nic locks are stupid */
982 /* XXX print out if we can't lock the NIC? */
983 if (iwm_nic_lock(sc)) {
984 /* XXX handle if RX stop doesn't finish? */
985 (void) iwm_pcie_rx_stop(sc);
991 * The hw rx ring index in shared memory must also be cleared,
992 * otherwise the discrepancy can cause reprocessing chaos.
994 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
998 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1002 iwm_dma_contig_free(&ring->desc_dma);
1003 iwm_dma_contig_free(&ring->stat_dma);
1005 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1006 struct iwm_rx_data *data = &ring->data[i];
1008 if (data->m != NULL) {
1009 bus_dmamap_sync(ring->data_dmat, data->map,
1010 BUS_DMASYNC_POSTREAD);
1011 bus_dmamap_unload(ring->data_dmat, data->map);
1015 if (data->map != NULL) {
1016 bus_dmamap_destroy(ring->data_dmat, data->map);
1020 if (ring->data_dmat != NULL) {
1021 bus_dma_tag_destroy(ring->data_dmat);
1022 ring->data_dmat = NULL;
1027 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1037 /* Allocate TX descriptors (256-byte aligned). */
1038 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1039 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1041 device_printf(sc->sc_dev,
1042 "could not allocate TX ring DMA memory\n");
1045 ring->desc = ring->desc_dma.vaddr;
1048 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1049 * to allocate commands space for other rings.
1051 if (qid > IWM_MVM_CMD_QUEUE)
1054 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1055 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1057 device_printf(sc->sc_dev,
1058 "could not allocate TX cmd DMA memory\n");
1061 ring->cmd = ring->cmd_dma.vaddr;
1063 #if defined(__DragonFly__)
1064 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1066 BUS_SPACE_MAXADDR_32BIT,
1069 MCLBYTES, IWM_MAX_SCATTER - 1, MCLBYTES,
1070 BUS_DMA_NOWAIT, &ring->data_dmat);
1072 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1073 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1074 IWM_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1078 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1082 paddr = ring->cmd_dma.paddr;
1083 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1084 struct iwm_tx_data *data = &ring->data[i];
1086 data->cmd_paddr = paddr;
1087 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1088 + offsetof(struct iwm_tx_cmd, scratch);
1089 paddr += sizeof(struct iwm_device_cmd);
1091 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1093 device_printf(sc->sc_dev,
1094 "could not create TX buf DMA map\n");
1098 KASSERT(paddr == ring->cmd_dma.paddr + size,
1099 ("invalid physical address"));
1102 fail: iwm_free_tx_ring(sc, ring);
1107 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1111 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1112 struct iwm_tx_data *data = &ring->data[i];
1114 if (data->m != NULL) {
1115 bus_dmamap_sync(ring->data_dmat, data->map,
1116 BUS_DMASYNC_POSTWRITE);
1117 bus_dmamap_unload(ring->data_dmat, data->map);
1122 /* Clear TX descriptors. */
1123 memset(ring->desc, 0, ring->desc_dma.size);
1124 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1125 BUS_DMASYNC_PREWRITE);
1126 sc->qfullmsk &= ~(1 << ring->qid);
1132 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1136 iwm_dma_contig_free(&ring->desc_dma);
1137 iwm_dma_contig_free(&ring->cmd_dma);
1139 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1140 struct iwm_tx_data *data = &ring->data[i];
1142 if (data->m != NULL) {
1143 bus_dmamap_sync(ring->data_dmat, data->map,
1144 BUS_DMASYNC_POSTWRITE);
1145 bus_dmamap_unload(ring->data_dmat, data->map);
1149 if (data->map != NULL) {
1150 bus_dmamap_destroy(ring->data_dmat, data->map);
1154 if (ring->data_dmat != NULL) {
1155 bus_dma_tag_destroy(ring->data_dmat);
1156 ring->data_dmat = NULL;
1161 * High-level hardware frobbing routines
1165 iwm_enable_interrupts(struct iwm_softc *sc)
1167 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1168 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1172 iwm_restore_interrupts(struct iwm_softc *sc)
1174 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1178 iwm_disable_interrupts(struct iwm_softc *sc)
1180 /* disable interrupts */
1181 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1183 /* acknowledge all interrupts */
1184 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1185 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1189 iwm_ict_reset(struct iwm_softc *sc)
1191 iwm_disable_interrupts(sc);
1193 /* Reset ICT table. */
1194 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1197 /* Set physical address of ICT table (4KB aligned). */
1198 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1199 IWM_CSR_DRAM_INT_TBL_ENABLE
1200 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1201 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1203 /* Switch to ICT interrupt mode in driver. */
1204 sc->sc_flags |= IWM_FLAG_USE_ICT;
1206 /* Re-enable interrupts. */
1207 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1208 iwm_enable_interrupts(sc);
1211 /* iwlwifi pcie/trans.c */
1214 * Since this .. hard-resets things, it's time to actually
1215 * mark the first vap (if any) as having no mac context.
1216 * It's annoying, but since the driver is potentially being
1217 * stop/start'ed whilst active (thanks openbsd port!) we
1218 * have to correctly track this.
1221 iwm_stop_device(struct iwm_softc *sc)
1223 struct ieee80211com *ic = sc->sc_ic;
1224 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1228 /* tell the device to stop sending interrupts */
1229 iwm_disable_interrupts(sc);
1232 * FreeBSD-local: mark the first vap as not-uploaded,
1233 * so the next transition through auth/assoc
1234 * will correctly populate the MAC context.
1237 struct iwm_vap *iv = IWM_VAP(vap);
1238 iv->is_uploaded = 0;
1241 /* device going down, Stop using ICT table */
1242 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1244 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1246 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1248 /* Stop all DMA channels. */
1249 if (iwm_nic_lock(sc)) {
1250 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1252 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1253 for (ntries = 0; ntries < 200; ntries++) {
1256 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1257 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1267 iwm_reset_rx_ring(sc, &sc->rxq);
1269 /* Reset all TX rings. */
1270 for (qid = 0; qid < nitems(sc->txq); qid++)
1271 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1274 * Power-down device's busmaster DMA clocks
1276 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1279 /* Make sure (redundant) we've released our request to stay awake */
1280 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1281 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1283 /* Stop the device, and put it in low power state */
1286 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1287 * Clean again the interrupt here
1289 iwm_disable_interrupts(sc);
1290 /* stop and reset the on-board processor */
1291 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1294 * Even if we stop the HW, we still want the RF kill
1297 iwm_enable_rfkill_int(sc);
1298 iwm_check_rfkill(sc);
1301 /* iwlwifi: mvm/ops.c */
1303 iwm_mvm_nic_config(struct iwm_softc *sc)
1305 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1306 uint32_t reg_val = 0;
1308 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1309 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1310 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1311 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1312 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1313 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1316 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1317 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1318 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1319 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1321 /* radio configuration */
1322 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1323 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1324 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1326 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1328 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1329 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1330 radio_cfg_step, radio_cfg_dash);
1333 * W/A : NIC is stuck in a reset state after Early PCIe power off
1334 * (PCIe power is lost before PERST# is asserted), causing ME FW
1335 * to lose ownership and not being able to obtain it back.
1337 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1338 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1339 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1343 iwm_nic_rx_init(struct iwm_softc *sc)
1345 if (!iwm_nic_lock(sc))
1349 * Initialize RX ring. This is from the iwn driver.
1351 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1354 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1355 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1356 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1357 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1358 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1360 /* Set physical address of RX ring (256-byte aligned). */
1362 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1364 /* Set physical address of RX status (16-byte aligned). */
1366 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1368 #if defined(__DragonFly__)
1369 /* Force serialization (probably not needed but don't trust the HW) */
1370 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1375 * Note: Linux driver also sets this:
1376 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1378 * It causes weird behavior. YMMV.
1380 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1381 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1382 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1383 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1384 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1385 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1387 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1389 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1390 if (sc->host_interrupt_operation_mode)
1391 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1394 * Thus sayeth el jefe (iwlwifi) via a comment:
1396 * This value should initially be 0 (before preparing any
1397 * RBs), should be 8 after preparing the first 8 RBs (for example)
1399 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1407 iwm_nic_tx_init(struct iwm_softc *sc)
1411 if (!iwm_nic_lock(sc))
1414 /* Deactivate TX scheduler. */
1415 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1417 /* Set physical address of "keep warm" page (16-byte aligned). */
1418 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1420 /* Initialize TX rings. */
1421 for (qid = 0; qid < nitems(sc->txq); qid++) {
1422 struct iwm_tx_ring *txq = &sc->txq[qid];
1424 /* Set physical address of TX ring (256-byte aligned). */
1425 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1426 txq->desc_dma.paddr >> 8);
1427 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1428 "%s: loading ring %d descriptors (%p) at %lx\n",
1431 (unsigned long) (txq->desc_dma.paddr >> 8));
1439 iwm_nic_init(struct iwm_softc *sc)
1446 iwm_mvm_nic_config(sc);
1448 if ((error = iwm_nic_rx_init(sc)) != 0)
1452 * Ditto for TX, from iwn
1454 if ((error = iwm_nic_tx_init(sc)) != 0)
1457 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1458 "%s: shadow registers enabled\n", __func__);
1459 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1464 enum iwm_mvm_tx_fifo {
1465 IWM_MVM_TX_FIFO_BK = 0,
1469 IWM_MVM_TX_FIFO_MCAST = 5,
1472 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1480 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1482 if (!iwm_nic_lock(sc)) {
1483 device_printf(sc->sc_dev,
1484 "%s: cannot enable txq %d\n",
1487 return; /* XXX return EBUSY */
1490 /* unactivate before configuration */
1491 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1492 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1493 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1495 if (qid != IWM_MVM_CMD_QUEUE) {
1496 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1499 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1501 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1502 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1504 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1505 /* Set scheduler window size and frame limit. */
1507 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1509 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1510 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1511 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1512 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1514 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1515 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1516 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1517 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1518 IWM_SCD_QUEUE_STTS_REG_MSK);
1522 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1523 "%s: enabled txq %d FIFO %d\n",
1524 __func__, qid, fifo);
1528 iwm_post_alive(struct iwm_softc *sc)
1533 if (!iwm_nic_lock(sc))
1536 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1537 device_printf(sc->sc_dev,
1538 "%s: sched addr mismatch",
1546 /* Clear TX scheduler state in SRAM. */
1547 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1548 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1550 error = iwm_write_mem(sc,
1551 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1556 /* Set physical address of TX scheduler rings (1KB aligned). */
1557 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1559 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1561 /* enable command channel */
1562 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1564 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1566 /* Enable DMA channels. */
1567 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1568 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1569 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1570 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1573 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1574 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1576 /* Enable L1-Active */
1577 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1578 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1586 * NVM read access and content parsing. We do not support
1587 * external NVM or writing NVM.
1591 /* list of NVM sections we are allowed/need to read */
1592 const int nvm_to_read[] = {
1593 IWM_NVM_SECTION_TYPE_HW,
1594 IWM_NVM_SECTION_TYPE_SW,
1595 IWM_NVM_SECTION_TYPE_CALIBRATION,
1596 IWM_NVM_SECTION_TYPE_PRODUCTION,
1599 /* Default NVM size to read */
1600 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1601 #define IWM_MAX_NVM_SECTION_SIZE 7000
1603 #define IWM_NVM_WRITE_OPCODE 1
1604 #define IWM_NVM_READ_OPCODE 0
1607 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1608 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1611 struct iwm_nvm_access_cmd nvm_access_cmd = {
1612 .offset = htole16(offset),
1613 .length = htole16(length),
1614 .type = htole16(section),
1615 .op_code = IWM_NVM_READ_OPCODE,
1617 struct iwm_nvm_access_resp *nvm_resp;
1618 struct iwm_rx_packet *pkt;
1619 struct iwm_host_cmd cmd = {
1620 .id = IWM_NVM_ACCESS_CMD,
1621 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1622 IWM_CMD_SEND_IN_RFKILL,
1623 .data = { &nvm_access_cmd, },
1625 int ret, bytes_read, offset_read;
1628 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1630 ret = iwm_send_cmd(sc, &cmd);
1635 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1636 device_printf(sc->sc_dev,
1637 "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1638 __func__, pkt->hdr.flags);
1643 /* Extract NVM response */
1644 nvm_resp = (void *)pkt->data;
1646 ret = le16toh(nvm_resp->status);
1647 bytes_read = le16toh(nvm_resp->length);
1648 offset_read = le16toh(nvm_resp->offset);
1649 resp_data = nvm_resp->data;
1651 device_printf(sc->sc_dev,
1652 "%s: NVM access command failed with status %d\n",
1658 if (offset_read != offset) {
1659 device_printf(sc->sc_dev,
1660 "%s: NVM ACCESS response with invalid offset %d\n",
1661 __func__, offset_read);
1666 memcpy(data + offset, resp_data, bytes_read);
1670 iwm_free_resp(sc, &cmd);
1675 * Reads an NVM section completely.
1676 * NICs prior to 7000 family doesn't have a real NVM, but just read
1677 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1678 * by uCode, we need to manually check in this case that we don't
1679 * overflow and try to read more than the EEPROM size.
1680 * For 7000 family NICs, we supply the maximal size we can read, and
1681 * the uCode fills the response with as much data as we can,
1682 * without overflowing, so no check is needed.
1685 iwm_nvm_read_section(struct iwm_softc *sc,
1686 uint16_t section, uint8_t *data, uint16_t *len)
1688 uint16_t length, seglen;
1691 /* Set nvm section read length */
1692 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1695 /* Read the NVM until exhausted (reading less than requested) */
1696 while (seglen == length) {
1697 error = iwm_nvm_read_chunk(sc,
1698 section, *len, length, data, &seglen);
1700 device_printf(sc->sc_dev,
1701 "Cannot read NVM from section "
1702 "%d offset %d, length %d\n",
1703 section, *len, length);
1709 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1710 "NVM section %d read completed\n", section);
1715 * BEGIN IWM_NVM_PARSE
1718 /* iwlwifi/iwl-nvm-parse.c */
1720 /* NVM offsets (in words) definitions */
1721 enum wkp_nvm_offsets {
1722 /* NVM HW-Section offset (in words) definitions */
1725 /* NVM SW-Section offset (in words) definitions */
1726 IWM_NVM_SW_SECTION = 0x1C0,
1727 IWM_NVM_VERSION = 0,
1731 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1733 /* NVM calibration section offset (in words) definitions */
1734 IWM_NVM_CALIB_SECTION = 0x2B8,
1735 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1738 /* SKU Capabilities (actual values from NVM definition) */
1740 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1741 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1742 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1743 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1746 /* radio config bits (actual values from NVM definition) */
1747 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1748 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1749 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1750 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1751 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1752 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1754 #define DEFAULT_MAX_TX_POWER 16
1757 * enum iwm_nvm_channel_flags - channel flags in NVM
1758 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1759 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1760 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1761 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1762 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1763 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1764 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1765 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1766 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1768 enum iwm_nvm_channel_flags {
1769 IWM_NVM_CHANNEL_VALID = (1 << 0),
1770 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1771 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1772 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1773 IWM_NVM_CHANNEL_DFS = (1 << 7),
1774 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1775 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1776 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1777 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1781 * Add a channel to the net80211 channel list.
1783 * ieee is the ieee channel number
1784 * ch_idx is channel index.
1785 * mode is the channel mode - CHAN_A, CHAN_B, CHAN_G.
1786 * ch_flags is the iwm channel flags.
1788 * Return 0 on OK, < 0 on error.
1791 iwm_init_net80211_channel(struct iwm_softc *sc, int ieee, int ch_idx,
1792 int mode, uint16_t ch_flags)
1794 /* XXX for now, no overflow checking! */
1795 struct ieee80211com *ic = sc->sc_ic;
1797 struct ieee80211_channel *channel;
1799 channel = &ic->ic_channels[ic->ic_nchans++];
1800 channel->ic_ieee = ieee;
1802 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
1804 flags = IEEE80211_CHAN_2GHZ;
1805 channel->ic_flags = mode;
1807 flags = IEEE80211_CHAN_5GHZ;
1808 channel->ic_flags = mode;
1810 channel->ic_freq = ieee80211_ieee2mhz(ieee, flags);
1812 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
1813 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
1818 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
1820 struct ieee80211com *ic = sc->sc_ic;
1821 struct iwm_nvm_data *data = &sc->sc_nvm;
1826 for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
1827 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1829 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
1830 !data->sku_cap_band_52GHz_enable)
1831 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
1833 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1834 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1835 "Ch. %d Flags %x [%sGHz] - No traffic\n",
1836 iwm_nvm_channels[ch_idx],
1838 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1843 hw_value = iwm_nvm_channels[ch_idx];
1846 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS) {
1847 (void) iwm_init_net80211_channel(sc, hw_value,
1852 (void) iwm_init_net80211_channel(sc, hw_value,
1856 /* If it's not channel 13, also add 11g */
1858 (void) iwm_init_net80211_channel(sc, hw_value,
1864 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1865 "Ch. %d Flags %x [%sGHz] - Added\n",
1866 iwm_nvm_channels[ch_idx],
1868 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1871 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1875 iwm_parse_nvm_data(struct iwm_softc *sc,
1876 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1877 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1879 struct iwm_nvm_data *data = &sc->sc_nvm;
1880 uint8_t hw_addr[IEEE80211_ADDR_LEN];
1881 uint16_t radio_cfg, sku;
1883 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1885 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1886 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1887 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1888 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1889 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1890 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1891 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1893 sku = le16_to_cpup(nvm_sw + IWM_SKU);
1894 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1895 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1896 data->sku_cap_11n_enable = 0;
1898 if (!data->valid_tx_ant || !data->valid_rx_ant) {
1899 device_printf(sc->sc_dev,
1900 "%s: invalid antennas (0x%x, 0x%x)\n",
1901 __func__, data->valid_tx_ant,
1902 data->valid_rx_ant);
1906 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1908 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1909 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1911 /* The byte order is little endian 16 bit, meaning 214365 */
1912 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1913 data->hw_addr[0] = hw_addr[1];
1914 data->hw_addr[1] = hw_addr[0];
1915 data->hw_addr[2] = hw_addr[3];
1916 data->hw_addr[3] = hw_addr[2];
1917 data->hw_addr[4] = hw_addr[5];
1918 data->hw_addr[5] = hw_addr[4];
1920 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
1921 data->calib_version = 255; /* TODO:
1922 this value will prevent some checks from
1923 failing, we need to check if this
1924 field is still needed, and if it does,
1925 where is it in the NVM */
1934 struct iwm_nvm_section {
1936 const uint8_t *data;
1940 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1942 const uint16_t *hw, *sw, *calib;
1944 /* Checking for required sections */
1945 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1946 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1947 device_printf(sc->sc_dev,
1948 "%s: Can't parse empty NVM sections\n",
1953 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1954 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1955 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1956 return iwm_parse_nvm_data(sc, hw, sw, calib,
1957 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1961 iwm_nvm_init(struct iwm_softc *sc)
1963 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1964 int i, section, error;
1966 uint8_t *nvm_buffer, *temp;
1968 /* Read From FW NVM */
1969 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1973 /* TODO: find correct NVM max size for a section */
1974 nvm_buffer = kmalloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_INTWAIT);
1975 if (nvm_buffer == NULL)
1977 for (i = 0; i < nitems(nvm_to_read); i++) {
1978 section = nvm_to_read[i];
1979 KASSERT(section <= nitems(nvm_sections),
1980 ("too many sections"));
1982 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1986 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
1991 memcpy(temp, nvm_buffer, len);
1992 nvm_sections[section].data = temp;
1993 nvm_sections[section].length = len;
1995 kfree(nvm_buffer, M_DEVBUF);
1999 return iwm_parse_nvm_sections(sc, nvm_sections);
2003 * Firmware loading gunk. This is kind of a weird hybrid between the
2004 * iwn driver and the Linux iwlwifi driver.
2008 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2009 const uint8_t *section, uint32_t byte_cnt)
2011 struct iwm_dma_info *dma = &sc->fw_dma;
2014 /* Copy firmware section into pre-allocated DMA-safe memory. */
2015 memcpy(dma->vaddr, section, byte_cnt);
2016 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2018 if (!iwm_nic_lock(sc))
2021 sc->sc_fw_chunk_done = 0;
2023 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2024 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2025 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2027 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2028 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2029 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2030 (iwm_get_dma_hi_addr(dma->paddr)
2031 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2032 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2033 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2034 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2035 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2036 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2037 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2038 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2039 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2043 /* wait 1s for this segment to load */
2045 while (!sc->sc_fw_chunk_done) {
2046 #if defined(__DragonFly__)
2047 error = iwmsleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2049 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2059 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2061 struct iwm_fw_sects *fws;
2067 sc->sc_uc.uc_intr = 0;
2069 fws = &sc->sc_fw.fw_sects[ucode_type];
2070 for (i = 0; i < fws->fw_count; i++) {
2071 data = fws->fw_sect[i].fws_data;
2072 dlen = fws->fw_sect[i].fws_len;
2073 offset = fws->fw_sect[i].fws_devoff;
2074 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2075 "LOAD FIRMWARE type %d offset %u len %d\n",
2076 ucode_type, offset, dlen);
2077 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2079 device_printf(sc->sc_dev,
2080 "%s: chunk %u of %u returned error %02d\n",
2081 __func__, i, fws->fw_count, error);
2086 /* wait for the firmware to load */
2087 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2089 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2090 #if defined(__DragonFly__)
2091 error = iwmsleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2093 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2100 /* iwlwifi: pcie/trans.c */
2102 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2106 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2108 if ((error = iwm_nic_init(sc)) != 0) {
2109 device_printf(sc->sc_dev, "unable to init nic\n");
2113 /* make sure rfkill handshake bits are cleared */
2114 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2115 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2116 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2118 /* clear (again), then enable host interrupts */
2119 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2120 iwm_enable_interrupts(sc);
2122 /* really make sure rfkill handshake bits are cleared */
2123 /* maybe we should write a few times more? just to make sure */
2124 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2125 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2127 /* Load the given image to the HW */
2128 return iwm_load_firmware(sc, ucode_type);
2132 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2134 return iwm_post_alive(sc);
2138 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2140 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2141 .valid = htole32(valid_tx_ant),
2144 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2145 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2148 /* iwlwifi: mvm/fw.c */
2150 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2152 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2153 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2155 /* Set parameters */
2156 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2157 phy_cfg_cmd.calib_control.event_trigger =
2158 sc->sc_default_calib[ucode_type].event_trigger;
2159 phy_cfg_cmd.calib_control.flow_trigger =
2160 sc->sc_default_calib[ucode_type].flow_trigger;
2162 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2163 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2164 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2165 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2169 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2170 enum iwm_ucode_type ucode_type)
2172 enum iwm_ucode_type old_type = sc->sc_uc_current;
2175 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2176 kprintf("iwm_read_firmweare: failed %d\n",
2181 sc->sc_uc_current = ucode_type;
2182 error = iwm_start_fw(sc, ucode_type);
2184 kprintf("iwm_start_fw: failed %d\n", error);
2185 sc->sc_uc_current = old_type;
2189 error = iwm_fw_alive(sc, sc->sched_base);
2191 kprintf("iwm_fw_alive: failed %d\n", error);
2201 * follows iwlwifi/fw.c
2204 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2208 /* do not operate with rfkill switch turned on */
2209 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2210 device_printf(sc->sc_dev,
2211 "radio is disabled by hardware switch\n");
2215 sc->sc_init_complete = 0;
2216 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2217 IWM_UCODE_TYPE_INIT)) != 0) {
2218 kprintf("iwm_mvm_load_ucode_wait_alive: failed %d\n",
2224 if ((error = iwm_nvm_init(sc)) != 0) {
2225 device_printf(sc->sc_dev, "failed to read nvm\n");
2228 IEEE80211_ADDR_COPY(sc->sc_bssid, &sc->sc_nvm.hw_addr);
2230 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2231 + sc->sc_capa_max_probe_len
2232 + IWM_MAX_NUM_SCAN_CHANNELS
2233 * sizeof(struct iwm_scan_channel);
2234 sc->sc_scan_cmd = kmalloc(sc->sc_scan_cmd_len, M_DEVBUF,
2236 if (sc->sc_scan_cmd == NULL)
2242 /* Send TX valid antennas before triggering calibrations */
2243 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
2244 kprintf("iwm_send_tx_ant_cfg: failed %d\n", error);
2249 * Send phy configurations command to init uCode
2250 * to start the 16.0 uCode init image internal calibrations.
2252 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2253 device_printf(sc->sc_dev,
2254 "%s: failed to run internal calibration: %d\n",
2260 * Nothing to do but wait for the init complete notification
2263 while (!sc->sc_init_complete) {
2264 #if defined(__DragonFly__)
2265 error = iwmsleep(&sc->sc_init_complete, &sc->sc_lk,
2266 0, "iwminit", 2*hz);
2268 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2269 0, "iwminit", 2*hz);
2272 kprintf("init complete failed %d\n",
2273 sc->sc_init_complete);
2285 /* (re)stock rx ring, called at init-time and at runtime */
2287 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2289 struct iwm_rx_ring *ring = &sc->rxq;
2290 struct iwm_rx_data *data = &ring->data[idx];
2295 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2299 if (data->m != NULL)
2300 bus_dmamap_unload(ring->data_dmat, data->map);
2302 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2303 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2305 device_printf(sc->sc_dev,
2306 "%s: could not create RX buf DMA map, error %d\n",
2311 error = bus_dmamap_load(ring->data_dmat, data->map,
2312 mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2313 &paddr, BUS_DMA_NOWAIT);
2314 if (error != 0 && error != EFBIG) {
2315 device_printf(sc->sc_dev,
2316 "%s: can't not map mbuf, error %d\n", __func__,
2320 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2322 /* Update RX descriptor. */
2323 KKASSERT((paddr & 255) == 0);
2324 ring->desc[idx] = htole32(paddr >> 8);
2325 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2326 BUS_DMASYNC_PREWRITE);
2333 /* iwlwifi: mvm/rx.c */
2334 #define IWM_RSSI_OFFSET 50
2336 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2338 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2339 uint32_t agc_a, agc_b;
2342 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2343 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2344 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2346 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2347 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2348 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2351 * dBm = rssi dB - agc dB - constant.
2352 * Higher AGC (higher radio gain) means lower signal.
2354 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2355 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2356 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2358 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2359 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2360 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2362 return max_rssi_dbm;
2365 /* iwlwifi: mvm/rx.c */
2367 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2368 * values are reported by the fw as positive values - need to negate
2369 * to obtain their dBM. Account for missing antennas by replacing 0
2370 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2373 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2375 int energy_a, energy_b, energy_c, max_energy;
2378 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2379 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2380 IWM_RX_INFO_ENERGY_ANT_A_POS;
2381 energy_a = energy_a ? -energy_a : -256;
2382 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2383 IWM_RX_INFO_ENERGY_ANT_B_POS;
2384 energy_b = energy_b ? -energy_b : -256;
2385 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2386 IWM_RX_INFO_ENERGY_ANT_C_POS;
2387 energy_c = energy_c ? -energy_c : -256;
2388 max_energy = MAX(energy_a, energy_b);
2389 max_energy = MAX(max_energy, energy_c);
2391 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2392 "energy In A %d B %d C %d , and max %d\n",
2393 energy_a, energy_b, energy_c, max_energy);
2399 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2400 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2402 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2404 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2405 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2407 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2411 * Retrieve the average noise (in dBm) among receivers.
2414 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2416 int i, total, nbant, noise;
2418 total = nbant = noise = 0;
2419 for (i = 0; i < 3; i++) {
2420 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2427 /* There should be at least one antenna but check anyway. */
2428 return (nbant == 0) ? -127 : (total / nbant) - 107;
2432 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2434 * Handles the actual data of the Rx packet from the fw
2437 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2438 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2440 struct ieee80211com *ic = sc->sc_ic;
2441 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2442 struct ieee80211_frame *wh;
2443 struct ieee80211_node *ni;
2444 struct ieee80211_rx_stats rxs;
2446 struct iwm_rx_phy_info *phy_info;
2447 struct iwm_rx_mpdu_res_start *rx_res;
2449 uint32_t rx_pkt_status;
2452 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2454 phy_info = &sc->sc_last_phy_info;
2455 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2456 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2457 len = le16toh(rx_res->byte_count);
2458 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2461 m->m_data = pkt->data + sizeof(*rx_res);
2462 m->m_pkthdr.len = m->m_len = len;
2464 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2465 device_printf(sc->sc_dev,
2466 "dsp size out of range [0,20]: %d\n",
2467 phy_info->cfg_phy_cnt);
2471 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2472 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2473 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2474 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2478 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2479 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2481 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2483 rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
2484 rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */
2486 /* replenish ring for the buffer we're going to feed to the sharks */
2487 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2488 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2493 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2495 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2496 "%s: phy_info: channel=%d, flags=0x%08x\n",
2498 le16toh(phy_info->channel),
2499 le16toh(phy_info->phy_flags));
2502 * Populate an RX state struct with the provided information.
2504 bzero(&rxs, sizeof(rxs));
2505 #if !defined(__DragonFly__)
2506 /* requires new fbsd stack */
2507 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2509 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2510 #if defined(__DragonFly__)
2513 c_ieee = le16toh(phy_info->channel);
2514 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2515 c_freq = ieee80211_ieee2mhz(c_ieee, IEEE80211_CHAN_2GHZ);
2517 c_freq = ieee80211_ieee2mhz(c_ieee, IEEE80211_CHAN_5GHZ);
2520 rxs.c_ieee = le16toh(phy_info->channel);
2521 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2522 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2524 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2527 rxs.rssi = rssi - sc->sc_noise;
2528 rxs.nf = sc->sc_noise;
2530 if (ieee80211_radiotap_active_vap(vap)) {
2531 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2534 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2535 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2536 #if defined(__DragonFly__)
2537 tap->wr_chan_freq = htole16(c_freq);
2539 tap->wr_chan_freq = htole16(rxs.c_freq);
2541 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2542 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2543 tap->wr_dbm_antsignal = (int8_t)rssi;
2544 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2545 tap->wr_tsft = phy_info->system_timestamp;
2546 switch (phy_info->rate) {
2548 case 10: tap->wr_rate = 2; break;
2549 case 20: tap->wr_rate = 4; break;
2550 case 55: tap->wr_rate = 11; break;
2551 case 110: tap->wr_rate = 22; break;
2553 case 0xd: tap->wr_rate = 12; break;
2554 case 0xf: tap->wr_rate = 18; break;
2555 case 0x5: tap->wr_rate = 24; break;
2556 case 0x7: tap->wr_rate = 36; break;
2557 case 0x9: tap->wr_rate = 48; break;
2558 case 0xb: tap->wr_rate = 72; break;
2559 case 0x1: tap->wr_rate = 96; break;
2560 case 0x3: tap->wr_rate = 108; break;
2561 /* Unknown rate: should not happen. */
2562 default: tap->wr_rate = 0;
2568 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2569 ieee80211_input_mimo(ni, m, &rxs);
2570 ieee80211_free_node(ni);
2572 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2573 ieee80211_input_mimo_all(ic, m, &rxs);
2579 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2580 struct iwm_node *in)
2582 struct ifnet *ifp = sc->sc_ifp;
2583 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2584 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2585 int failack = tx_resp->failure_frame;
2587 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2589 /* Update rate control statistics. */
2590 if (status != IWM_TX_STATUS_SUCCESS &&
2591 status != IWM_TX_STATUS_DIRECT_DONE) {
2592 #if defined(__DragonFly__)
2595 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2597 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2598 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2601 #if defined(__DragonFly__)
2604 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2606 ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
2607 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2612 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2613 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2615 struct ifnet *ifp = sc->sc_ifp;
2616 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2617 int idx = cmd_hdr->idx;
2618 int qid = cmd_hdr->qid;
2619 struct iwm_tx_ring *ring = &sc->txq[qid];
2620 struct iwm_tx_data *txd = &ring->data[idx];
2621 struct iwm_node *in = txd->in;
2624 device_printf(sc->sc_dev,
2625 "%s: got tx interrupt that's already been handled!\n",
2629 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2631 sc->sc_tx_timer = 0;
2633 iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2635 /* Unmap and free mbuf. */
2636 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2637 bus_dmamap_unload(ring->data_dmat, txd->map);
2640 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2641 "free txd %p, in %p\n", txd, txd->in);
2642 KASSERT(txd->done == 0, ("txd not done"));
2644 KASSERT(txd->in, ("txd without node"));
2648 ieee80211_free_node((struct ieee80211_node *)in);
2650 if (--ring->queued < IWM_TX_RING_LOMARK) {
2651 sc->qfullmsk &= ~(1 << ring->qid);
2652 #if defined(__DragonFly__)
2653 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
2654 ifq_clr_oactive(&ifp->if_snd);
2656 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_DRV_OACTIVE)) {
2657 ifp->if_flags &= ~IFF_DRV_OACTIVE;
2660 * Well, we're in interrupt context, but then again
2661 * I guess net80211 does all sorts of stunts in
2662 * interrupt context, so maybe this is no biggie.
2664 iwm_start_locked(ifp);
2674 * Process a "command done" firmware notification. This is where we wakeup
2675 * processes waiting for a synchronous command completion.
2679 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2681 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2682 struct iwm_tx_data *data;
2684 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2685 return; /* Not a command ack. */
2688 data = &ring->data[pkt->hdr.idx];
2690 /* If the command was mapped in an mbuf, free it. */
2691 if (data->m != NULL) {
2692 bus_dmamap_sync(ring->data_dmat, data->map,
2693 BUS_DMASYNC_POSTWRITE);
2694 bus_dmamap_unload(ring->data_dmat, data->map);
2698 wakeup(&ring->desc[pkt->hdr.idx]);
2703 * necessary only for block ack mode
2706 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2709 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2712 scd_bc_tbl = sc->sched_dma.vaddr;
2714 len += 8; /* magic numbers came naturally from paris */
2715 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2716 len = roundup(len, 4) / 4;
2718 w_val = htole16(sta_id << 12 | len);
2720 /* Update TX scheduler. */
2721 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2722 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2723 BUS_DMASYNC_PREWRITE);
2725 /* I really wonder what this is ?!? */
2726 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2727 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2728 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2729 BUS_DMASYNC_PREWRITE);
2735 * Take an 802.11 (non-n) rate, find the relevant rate
2736 * table entry. return the index into in_ridx[].
2738 * The caller then uses that index back into in_ridx
2739 * to figure out the rate index programmed /into/
2740 * the firmware for this given node.
2743 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2749 for (i = 0; i < nitems(in->in_ridx); i++) {
2750 r = iwm_rates[in->in_ridx[i]].rate;
2754 /* XXX Return the first */
2755 /* XXX TODO: have it return the /lowest/ */
2760 * Fill in various bit for management frames, and leave them
2761 * unfilled for data frames (firmware takes care of that).
2762 * Return the selected TX rate.
2764 static const struct iwm_rate *
2765 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2766 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2768 struct ieee80211com *ic = sc->sc_ic;
2769 struct ieee80211_node *ni = &in->in_ni;
2770 const struct iwm_rate *rinfo;
2771 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2772 int ridx, rate_flags;
2774 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2775 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2778 * XXX TODO: everything about the rate selection here is terrible!
2781 if (type == IEEE80211_FC0_TYPE_DATA) {
2783 /* for data frames, use RS table */
2784 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2785 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2786 ridx = in->in_ridx[i];
2788 /* This is the index into the programmed table */
2789 tx->initial_rate_index = i;
2790 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2791 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2792 "%s: start with i=%d, txrate %d\n",
2793 __func__, i, iwm_rates[ridx].rate);
2794 /* XXX no rate_n_flags? */
2795 return &iwm_rates[ridx];
2799 * For non-data, use the lowest supported rate for the given
2802 * Note: there may not be any rate control information available.
2803 * This driver currently assumes if we're transmitting data
2804 * frames, use the rate control table. Grr.
2806 * XXX TODO: use the configured rate for the traffic type!
2808 if (ic->ic_curmode == IEEE80211_MODE_11A) {
2810 * XXX this assumes the mode is either 11a or not 11a;
2811 * definitely won't work for 11n.
2813 ridx = IWM_RIDX_OFDM;
2815 ridx = IWM_RIDX_CCK;
2818 rinfo = &iwm_rates[ridx];
2820 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2823 !! (IWM_RIDX_IS_CCK(ridx))
2826 /* XXX TODO: hard-coded TX antenna? */
2827 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2828 if (IWM_RIDX_IS_CCK(ridx))
2829 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2830 /* XXX hard-coded tx rate */
2831 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2838 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2840 struct ieee80211com *ic = sc->sc_ic;
2841 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2842 struct iwm_node *in = (struct iwm_node *)ni;
2843 struct iwm_tx_ring *ring;
2844 struct iwm_tx_data *data;
2845 struct iwm_tfd *desc;
2846 struct iwm_device_cmd *cmd;
2847 struct iwm_tx_cmd *tx;
2848 struct ieee80211_frame *wh;
2849 struct ieee80211_key *k = NULL;
2851 const struct iwm_rate *rinfo;
2854 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2857 int i, totlen, error, pad;
2859 wh = mtod(m, struct ieee80211_frame *);
2860 hdrlen = ieee80211_anyhdrsize(wh);
2861 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2863 ring = &sc->txq[ac];
2864 desc = &ring->desc[ring->cur];
2865 memset(desc, 0, sizeof(*desc));
2866 data = &ring->data[ring->cur];
2868 /* Fill out iwm_tx_cmd to send to the firmware */
2869 cmd = &ring->cmd[ring->cur];
2870 cmd->hdr.code = IWM_TX_CMD;
2872 cmd->hdr.qid = ring->qid;
2873 cmd->hdr.idx = ring->cur;
2875 tx = (void *)cmd->data;
2876 memset(tx, 0, sizeof(*tx));
2878 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2880 /* Encrypt the frame if need be. */
2881 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2882 /* Retrieve key for TX && do software encryption. */
2883 k = ieee80211_crypto_encap(ni, m);
2888 /* 802.11 header may have moved. */
2889 wh = mtod(m, struct ieee80211_frame *);
2892 if (ieee80211_radiotap_active_vap(vap)) {
2893 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2896 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2897 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2898 tap->wt_rate = rinfo->rate;
2899 tap->wt_hwqueue = ac;
2901 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2902 ieee80211_radiotap_tx(vap, m);
2906 totlen = m->m_pkthdr.len;
2909 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2910 flags |= IWM_TX_CMD_FLG_ACK;
2913 if (type != IEEE80211_FC0_TYPE_DATA
2914 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2915 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2916 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2919 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2920 type != IEEE80211_FC0_TYPE_DATA)
2921 tx->sta_id = sc->sc_aux_sta.sta_id;
2923 tx->sta_id = IWM_STATION_ID;
2925 if (type == IEEE80211_FC0_TYPE_MGT) {
2926 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2928 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2929 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2930 tx->pm_frame_timeout = htole16(3);
2932 tx->pm_frame_timeout = htole16(2);
2934 tx->pm_frame_timeout = htole16(0);
2938 /* First segment length must be a multiple of 4. */
2939 flags |= IWM_TX_CMD_FLG_MH_PAD;
2940 pad = 4 - (hdrlen & 3);
2944 tx->driver_txop = 0;
2945 tx->next_frame_len = 0;
2947 tx->len = htole16(totlen);
2948 tx->tid_tspec = tid;
2949 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2951 /* Set physical address of "scratch area". */
2952 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2953 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2955 /* Copy 802.11 header in TX command. */
2956 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2958 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2961 tx->tx_flags |= htole32(flags);
2963 /* Trim 802.11 header. */
2965 #if defined(__DragonFly__)
2966 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
2967 segs, IWM_MAX_SCATTER - 1,
2968 &nsegs, BUS_DMA_NOWAIT);
2970 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2971 segs, &nsegs, BUS_DMA_NOWAIT);
2974 if (error != EFBIG) {
2975 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2980 /* Too many DMA segments, linearize mbuf. */
2981 MGETHDR(m1, M_NOWAIT, MT_DATA);
2986 if (m->m_pkthdr.len > MHLEN) {
2987 MCLGET(m1, M_NOWAIT);
2988 if (!(m1->m_flags & M_EXT)) {
2994 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
2995 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
2998 #if defined(__DragonFly__)
2999 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map, m,
3000 segs, IWM_MAX_SCATTER - 1,
3001 &nsegs, BUS_DMA_NOWAIT);
3003 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3004 segs, &nsegs, BUS_DMA_NOWAIT);
3007 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3017 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3018 "sending txd %p, in %p\n", data, data->in);
3019 KASSERT(data->in != NULL, ("node is NULL"));
3021 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3022 "sending data: qid=%d idx=%d len=%d nsegs=%d\n",
3023 ring->qid, ring->cur, totlen, nsegs);
3025 /* Fill TX descriptor. */
3026 desc->num_tbs = 2 + nsegs;
3028 desc->tbs[0].lo = htole32(data->cmd_paddr);
3029 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3031 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3032 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3033 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3034 + hdrlen + pad - TB0_SIZE) << 4);
3036 /* Other DMA segments are for data payload. */
3037 for (i = 0; i < nsegs; i++) {
3039 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3040 desc->tbs[i+2].hi_n_len = \
3041 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3042 | ((seg->ds_len) << 4);
3045 bus_dmamap_sync(ring->data_dmat, data->map,
3046 BUS_DMASYNC_PREWRITE);
3047 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3048 BUS_DMASYNC_PREWRITE);
3049 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3050 BUS_DMASYNC_PREWRITE);
3053 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3057 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3058 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3060 /* Mark TX ring as full if we reach a certain threshold. */
3061 if (++ring->queued > IWM_TX_RING_HIMARK) {
3062 sc->qfullmsk |= 1 << ring->qid;
3069 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3070 const struct ieee80211_bpf_params *params)
3072 struct ieee80211com *ic = ni->ni_ic;
3073 struct ifnet *ifp = ic->ic_ifp;
3074 struct iwm_softc *sc = ifp->if_softc;
3077 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3078 "->%s begin\n", __func__);
3080 #if defined(__DragonFly__)
3081 if ((ifp->if_flags & IFF_RUNNING) == 0) {
3083 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3085 ieee80211_free_node(ni);
3087 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3088 "<-%s not RUNNING\n", __func__);
3094 if (params == NULL) {
3095 error = iwm_tx(sc, m, ni, 0);
3097 error = iwm_tx(sc, m, ni, 0);
3100 /* NB: m is reclaimed on tx failure */
3101 ieee80211_free_node(ni);
3102 #if defined(__DragonFly__)
3105 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3108 sc->sc_tx_timer = 5;
3120 * Note that there are transports that buffer frames before they reach
3121 * the firmware. This means that after flush_tx_path is called, the
3122 * queue might not be empty. The race-free way to handle this is to:
3123 * 1) set the station as draining
3124 * 2) flush the Tx path
3125 * 3) wait for the transport queues to be empty
3128 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3130 struct iwm_tx_path_flush_cmd flush_cmd = {
3131 .queues_ctl = htole32(tfd_msk),
3132 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3136 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3137 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3138 sizeof(flush_cmd), &flush_cmd);
3140 device_printf(sc->sc_dev,
3141 "Flushing tx queue failed: %d\n", ret);
3151 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
3152 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
3154 memset(cmd_v5, 0, sizeof(*cmd_v5));
3156 cmd_v5->add_modify = cmd_v6->add_modify;
3157 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
3158 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
3159 IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
3160 cmd_v5->sta_id = cmd_v6->sta_id;
3161 cmd_v5->modify_mask = cmd_v6->modify_mask;
3162 cmd_v5->station_flags = cmd_v6->station_flags;
3163 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
3164 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
3165 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
3166 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
3167 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
3168 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
3169 cmd_v5->assoc_id = cmd_v6->assoc_id;
3170 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
3171 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
3175 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3176 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
3178 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
3180 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
3181 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
3182 sizeof(*cmd), cmd, status);
3185 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
3187 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
3191 /* send station add/update command to firmware */
3193 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3195 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
3199 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3201 add_sta_cmd.sta_id = IWM_STATION_ID;
3202 add_sta_cmd.mac_id_n_color
3203 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3204 IWM_DEFAULT_COLOR));
3206 add_sta_cmd.tfd_queue_msk = htole32(0xf);
3207 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3209 add_sta_cmd.add_modify = update ? 1 : 0;
3210 add_sta_cmd.station_flags_msk
3211 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3213 status = IWM_ADD_STA_SUCCESS;
3214 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3219 case IWM_ADD_STA_SUCCESS:
3223 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3231 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3235 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3243 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3245 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3249 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3250 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3252 struct iwm_mvm_add_sta_cmd_v6 cmd;
3256 memset(&cmd, 0, sizeof(cmd));
3257 cmd.sta_id = sta->sta_id;
3258 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3260 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3263 IEEE80211_ADDR_COPY(cmd.addr, addr);
3265 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3270 case IWM_ADD_STA_SUCCESS:
3271 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3272 "%s: Internal station added.\n", __func__);
3275 device_printf(sc->sc_dev,
3276 "%s: Add internal station failed, status=0x%x\n",
3285 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3289 sc->sc_aux_sta.sta_id = 3;
3290 sc->sc_aux_sta.tfd_queue_msk = 0;
3292 ret = iwm_mvm_add_int_sta_common(sc,
3293 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3296 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3309 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3311 struct iwm_time_quota_cmd cmd;
3312 int i, idx, ret, num_active_macs, quota, quota_rem;
3313 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3314 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3317 memset(&cmd, 0, sizeof(cmd));
3319 /* currently, PHY ID == binding ID */
3321 id = in->in_phyctxt->id;
3322 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3323 colors[id] = in->in_phyctxt->color;
3330 * The FW's scheduling session consists of
3331 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3332 * equally between all the bindings that require quota
3334 num_active_macs = 0;
3335 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3336 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3337 num_active_macs += n_ifs[i];
3342 if (num_active_macs) {
3343 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3344 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3347 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3351 cmd.quotas[idx].id_and_color =
3352 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3354 if (n_ifs[i] <= 0) {
3355 cmd.quotas[idx].quota = htole32(0);
3356 cmd.quotas[idx].max_duration = htole32(0);
3358 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3359 cmd.quotas[idx].max_duration = htole32(0);
3364 /* Give the remainder of the session to the first binding */
3365 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3367 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3370 device_printf(sc->sc_dev,
3371 "%s: Failed to send quota: %d\n", __func__, ret);
3380 * ieee80211 routines
3384 * Change to AUTH state in 80211 state machine. Roughly matches what
3385 * Linux does in bss_info_changed().
3388 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3390 struct ieee80211_node *ni;
3391 struct iwm_node *in;
3392 struct iwm_vap *iv = IWM_VAP(vap);
3394 uint32_t min_duration;
3398 * XXX i have a feeling that the vap node is being
3399 * freed from underneath us. Grr.
3401 ni = ieee80211_ref_node(vap->iv_bss);
3402 in = (struct iwm_node *) ni;
3403 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3404 "%s: called; vap=%p, bss ni=%p\n",
3411 error = iwm_allow_mcast(vap, sc);
3413 device_printf(sc->sc_dev,
3414 "%s: failed to set multicast\n", __func__);
3419 * This is where it deviates from what Linux does.
3421 * Linux iwlwifi doesn't reset the nic each time, nor does it
3422 * call ctxt_add() here. Instead, it adds it during vap creation,
3423 * and always does does a mac_ctx_changed().
3425 * The openbsd port doesn't attempt to do that - it reset things
3426 * at odd states and does the add here.
3428 * So, until the state handling is fixed (ie, we never reset
3429 * the NIC except for a firmware failure, which should drag
3430 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3431 * contexts that are required), let's do a dirty hack here.
3433 if (iv->is_uploaded) {
3434 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3435 device_printf(sc->sc_dev,
3436 "%s: failed to add MAC\n", __func__);
3440 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3441 device_printf(sc->sc_dev,
3442 "%s: failed to add MAC\n", __func__);
3447 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3448 in->in_ni.ni_chan, 1, 1)) != 0) {
3449 device_printf(sc->sc_dev,
3450 "%s: failed add phy ctxt\n", __func__);
3453 in->in_phyctxt = &sc->sc_phyctxt[0];
3455 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3456 device_printf(sc->sc_dev,
3457 "%s: binding cmd\n", __func__);
3461 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3462 device_printf(sc->sc_dev,
3463 "%s: failed to add MAC\n", __func__);
3467 /* a bit superfluous? */
3468 while (sc->sc_auth_prot) {
3469 #if defined(__DragonFly__)
3470 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmauth", 0);
3472 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmauth", 0);
3475 sc->sc_auth_prot = 1;
3477 duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
3478 200 + in->in_ni.ni_intval);
3479 min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
3480 100 + in->in_ni.ni_intval);
3481 iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
3483 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3484 "%s: waiting for auth_prot\n", __func__);
3485 while (sc->sc_auth_prot != 2) {
3487 * well, meh, but if the kernel is sleeping for half a
3488 * second, we have bigger problems
3490 if (sc->sc_auth_prot == 0) {
3491 device_printf(sc->sc_dev,
3492 "%s: missed auth window!\n", __func__);
3495 } else if (sc->sc_auth_prot == -1) {
3496 device_printf(sc->sc_dev,
3497 "%s: no time event, denied!\n", __func__);
3498 sc->sc_auth_prot = 0;
3502 #if defined(__DragonFly__)
3503 iwmsleep(&sc->sc_auth_prot, &sc->sc_lk, 0, "iwmau2", 0);
3505 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmau2", 0);
3508 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "<-%s\n", __func__);
3511 ieee80211_free_node(ni);
3516 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3518 struct iwm_node *in = (struct iwm_node *)vap->iv_bss;
3521 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3522 device_printf(sc->sc_dev,
3523 "%s: failed to update STA\n", __func__);
3528 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3529 device_printf(sc->sc_dev,
3530 "%s: failed to update MAC\n", __func__);
3538 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3541 * Ok, so *technically* the proper set of calls for going
3542 * from RUN back to SCAN is:
3544 * iwm_mvm_power_mac_disable(sc, in);
3545 * iwm_mvm_mac_ctxt_changed(sc, in);
3546 * iwm_mvm_rm_sta(sc, in);
3547 * iwm_mvm_update_quotas(sc, NULL);
3548 * iwm_mvm_mac_ctxt_changed(sc, in);
3549 * iwm_mvm_binding_remove_vif(sc, in);
3550 * iwm_mvm_mac_ctxt_remove(sc, in);
3552 * However, that freezes the device not matter which permutations
3553 * and modifications are attempted. Obviously, this driver is missing
3554 * something since it works in the Linux driver, but figuring out what
3555 * is missing is a little more complicated. Now, since we're going
3556 * back to nothing anyway, we'll just do a complete device reset.
3557 * Up your's, device!
3559 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3560 iwm_stop_device(sc);
3569 iwm_mvm_power_mac_disable(sc, in);
3571 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3572 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3576 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3577 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3580 error = iwm_mvm_rm_sta(sc, in);
3582 iwm_mvm_update_quotas(sc, NULL);
3583 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3584 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3587 iwm_mvm_binding_remove_vif(sc, in);
3589 iwm_mvm_mac_ctxt_remove(sc, in);
3595 static struct ieee80211_node *
3596 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3598 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
3599 M_INTWAIT | M_ZERO);
3603 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3605 struct ieee80211_node *ni = &in->in_ni;
3606 struct iwm_lq_cmd *lq = &in->in_lq;
3607 int nrates = ni->ni_rates.rs_nrates;
3608 int i, ridx, tab = 0;
3611 if (nrates > nitems(lq->rs_table)) {
3612 device_printf(sc->sc_dev,
3613 "%s: node supports %d rates, driver handles "
3614 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3619 * XXX .. and most of iwm_node is not initialised explicitly;
3620 * it's all just 0x0 passed to the firmware.
3623 /* first figure out which rates we should support */
3624 /* XXX TODO: this isn't 11n aware /at all/ */
3625 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3626 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3627 "%s: nrates=%d\n", __func__, nrates);
3628 for (i = 0; i < nrates; i++) {
3629 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
3631 /* Map 802.11 rate to HW rate index. */
3632 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3633 if (iwm_rates[ridx].rate == rate)
3635 if (ridx > IWM_RIDX_MAX) {
3636 device_printf(sc->sc_dev,
3637 "%s: WARNING: device rate for %d not found!\n",
3640 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3641 "%s: rate: i: %d, rate=%d, ridx=%d\n",
3646 in->in_ridx[i] = ridx;
3650 /* then construct a lq_cmd based on those */
3651 memset(lq, 0, sizeof(*lq));
3652 lq->sta_id = IWM_STATION_ID;
3655 * are these used? (we don't do SISO or MIMO)
3656 * need to set them to non-zero, though, or we get an error.
3658 lq->single_stream_ant_msk = 1;
3659 lq->dual_stream_ant_msk = 1;
3662 * Build the actual rate selection table.
3663 * The lowest bits are the rates. Additionally,
3664 * CCK needs bit 9 to be set. The rest of the bits
3665 * we add to the table select the tx antenna
3666 * Note that we add the rates in the highest rate first
3667 * (opposite of ni_rates).
3670 * XXX TODO: this should be looping over the min of nrates
3671 * and LQ_MAX_RETRY_NUM. Sigh.
3673 for (i = 0; i < nrates; i++) {
3677 txant = IWM_FW_VALID_TX_ANT(sc);
3678 nextant = 1<<(ffs(txant)-1);
3682 * Map the rate id into a rate index into
3683 * our hardware table containing the
3684 * configuration to use for this rate.
3686 ridx = in->in_ridx[(nrates-1)-i];
3687 tab = iwm_rates[ridx].plcp;
3688 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3689 if (IWM_RIDX_IS_CCK(ridx))
3690 tab |= IWM_RATE_MCS_CCK_MSK;
3691 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3692 "station rate i=%d, rate=%d, hw=%x\n",
3693 i, iwm_rates[ridx].rate, tab);
3694 lq->rs_table[i] = htole32(tab);
3696 /* then fill the rest with the lowest possible rate */
3697 for (i = nrates; i < nitems(lq->rs_table); i++) {
3698 KASSERT(tab != 0, ("invalid tab"));
3699 lq->rs_table[i] = htole32(tab);
3704 iwm_media_change(struct ifnet *ifp)
3706 struct iwm_softc *sc = ifp->if_softc;
3709 error = ieee80211_media_change(ifp);
3710 if (error != ENETRESET)
3713 #if defined(__DragonFly__)
3714 if ((ifp->if_flags & IFF_UP) &&
3715 (ifp->if_flags & IFF_RUNNING)) {
3717 if ((ifp->if_flags & IFF_UP) &&
3718 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3728 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3730 struct iwm_vap *ivp = IWM_VAP(vap);
3731 struct ieee80211com *ic = vap->iv_ic;
3732 struct iwm_softc *sc = ic->ic_ifp->if_softc;
3733 struct iwm_node *in;
3736 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3737 "switching state %s -> %s\n",
3738 ieee80211_state_name[vap->iv_state],
3739 ieee80211_state_name[nstate]);
3740 IEEE80211_UNLOCK(ic);
3742 /* disable beacon filtering if we're hopping out of RUN */
3743 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3744 iwm_mvm_disable_beacon_filter(sc);
3746 if (((in = (void *)vap->iv_bss) != NULL))
3749 iwm_release(sc, NULL);
3752 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3753 * above then the card will be completely reinitialized,
3754 * so the driver must do everything necessary to bring the card
3755 * from INIT to SCAN.
3757 * Additionally, upon receiving deauth frame from AP,
3758 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3759 * state. This will also fail with this driver, so bring the FSM
3760 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3762 * XXX TODO: fix this for FreeBSD!
3764 if (nstate == IEEE80211_S_SCAN ||
3765 nstate == IEEE80211_S_AUTH ||
3766 nstate == IEEE80211_S_ASSOC) {
3767 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3768 "Force transition to INIT; MGT=%d\n", arg);
3771 vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3772 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3773 "Going INIT->SCAN\n");
3774 nstate = IEEE80211_S_SCAN;
3775 IEEE80211_UNLOCK(ic);
3781 case IEEE80211_S_INIT:
3782 sc->sc_scanband = 0;
3785 case IEEE80211_S_AUTH:
3786 if ((error = iwm_auth(vap, sc)) != 0) {
3787 device_printf(sc->sc_dev,
3788 "%s: could not move to auth state: %d\n",
3794 case IEEE80211_S_ASSOC:
3795 if ((error = iwm_assoc(vap, sc)) != 0) {
3796 device_printf(sc->sc_dev,
3797 "%s: failed to associate: %d\n", __func__,
3803 case IEEE80211_S_RUN:
3805 struct iwm_host_cmd cmd = {
3807 .len = { sizeof(in->in_lq), },
3808 .flags = IWM_CMD_SYNC,
3811 /* Update the association state, now we have it all */
3812 /* (eg associd comes in at this point */
3813 error = iwm_assoc(vap, sc);
3815 device_printf(sc->sc_dev,
3816 "%s: failed to update association state: %d\n",
3822 in = (struct iwm_node *)vap->iv_bss;
3823 iwm_mvm_power_mac_update_mode(sc, in);
3824 iwm_mvm_enable_beacon_filter(sc, in);
3825 iwm_mvm_update_quotas(sc, in);
3826 iwm_setrates(sc, in);
3828 cmd.data[0] = &in->in_lq;
3829 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3830 device_printf(sc->sc_dev,
3831 "%s: IWM_LQ_CMD failed\n", __func__);
3843 return (ivp->iv_newstate(vap, nstate, arg));
3847 iwm_endscan_cb(void *arg, int pending)
3849 struct iwm_softc *sc = arg;
3850 struct ieee80211com *ic = sc->sc_ic;
3854 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3859 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3860 sc->sc_nvm.sku_cap_band_52GHz_enable) {
3862 if ((error = iwm_mvm_scan_request(sc,
3863 IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3864 device_printf(sc->sc_dev, "could not initiate scan\n");
3873 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3875 sc->sc_scanband = 0;
3881 iwm_init_hw(struct iwm_softc *sc)
3883 struct ieee80211com *ic = sc->sc_ic;
3886 if ((error = iwm_start_hw(sc)) != 0) {
3887 kprintf("iwm_start_hw: failed %d\n", error);
3891 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3892 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
3897 * should stop and start HW since that INIT
3900 iwm_stop_device(sc);
3901 if ((error = iwm_start_hw(sc)) != 0) {
3902 device_printf(sc->sc_dev, "could not initialize hardware\n");
3906 /* omstart, this time with the regular firmware */
3907 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3909 device_printf(sc->sc_dev, "could not load firmware\n");
3913 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0) {
3914 device_printf(sc->sc_dev, "antenna config failed\n");
3918 /* Send phy db control command and then phy db calibration*/
3919 if ((error = iwm_send_phy_db_data(sc)) != 0) {
3920 device_printf(sc->sc_dev, "phy_db_data failed\n");
3924 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
3925 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
3929 /* Add auxiliary station for scanning */
3930 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
3931 device_printf(sc->sc_dev, "add_aux_sta failed\n");
3935 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3937 * The channel used here isn't relevant as it's
3938 * going to be overwritten in the other flows.
3939 * For now use the first channel we have.
3941 if ((error = iwm_mvm_phy_ctxt_add(sc,
3942 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3946 error = iwm_mvm_power_update_device(sc);
3950 /* Mark TX rings as active. */
3951 for (qid = 0; qid < 4; qid++) {
3952 iwm_enable_txq(sc, qid, qid);
3958 iwm_stop_device(sc);
3962 /* Allow multicast from our BSSID. */
3964 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3966 struct ieee80211_node *ni = vap->iv_bss;
3967 struct iwm_mcast_filter_cmd *cmd;
3971 size = roundup(sizeof(*cmd), 4);
3972 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
3975 cmd->filter_own = 1;
3979 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3981 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3982 IWM_CMD_SYNC, size, cmd);
3983 kfree(cmd, M_DEVBUF);
3995 struct iwm_softc *sc = arg;
3998 iwm_init_locked(sc);
4003 iwm_init_locked(struct iwm_softc *sc)
4005 struct ifnet *ifp = sc->sc_ifp;
4008 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4011 sc->sc_generation++;
4012 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4014 if ((error = iwm_init_hw(sc)) != 0) {
4015 kprintf("iwm_init_hw failed %d\n", error);
4016 iwm_stop_locked(ifp);
4021 * Ok, firmware loaded and we are jogging
4023 #if defined(__DragonFly__)
4024 ifq_clr_oactive(&ifp->if_snd);
4025 ifp->if_flags |= IFF_RUNNING;
4027 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4028 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4030 sc->sc_flags |= IWM_FLAG_HW_INITED;
4031 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4035 * Dequeue packets from sendq and call send.
4038 #if defined(__DragonFly__)
4040 iwm_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4043 iwm_start(struct ifnet *ifp)
4046 struct iwm_softc *sc = ifp->if_softc;
4049 iwm_start_locked(ifp);
4054 iwm_start_locked(struct ifnet *ifp)
4056 struct iwm_softc *sc = ifp->if_softc;
4057 struct ieee80211_node *ni;
4061 #if defined(__DragonFly__)
4062 if ((ifp->if_flags & IFF_RUNNING) == 0)
4063 ifq_purge(&ifp->if_snd);
4064 if (ifq_is_oactive(&ifp->if_snd) || (ifp->if_flags & IFF_RUNNING) == 0)
4067 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
4071 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4073 /* why isn't this done per-queue? */
4074 if (sc->qfullmsk != 0) {
4075 #if defined(__DragonFly__)
4076 ifq_set_oactive(&ifp->if_snd);
4078 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
4082 m = ifq_dequeue(&ifp->if_snd);
4085 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4086 if (iwm_tx(sc, m, ni, ac) != 0) {
4087 ieee80211_free_node(ni);
4088 #if defined(__DragonFly__)
4091 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4096 if (ifp->if_flags & IFF_UP) {
4097 sc->sc_tx_timer = 15;
4100 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4104 iwm_stop(struct ifnet *ifp, int disable)
4106 struct iwm_softc *sc = ifp->if_softc;
4109 iwm_stop_locked(ifp);
4114 iwm_stop_locked(struct ifnet *ifp)
4116 struct iwm_softc *sc = ifp->if_softc;
4118 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4119 sc->sc_flags |= IWM_FLAG_STOPPED;
4120 sc->sc_generation++;
4121 sc->sc_scanband = 0;
4122 sc->sc_auth_prot = 0;
4123 #if defined(__DragonFly__)
4124 ifq_clr_oactive(&ifp->if_snd);
4125 ifp->if_flags &= ~IFF_RUNNING;
4127 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4129 sc->sc_tx_timer = 0;
4130 iwm_stop_device(sc);
4134 iwm_watchdog(void *arg)
4136 struct iwm_softc *sc = arg;
4137 struct ifnet *ifp = sc->sc_ifp;
4139 #if defined(__DragonFly__)
4141 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
4143 if (sc->sc_tx_timer > 0) {
4144 if (--sc->sc_tx_timer == 0) {
4145 device_printf(sc->sc_dev, "device timeout\n");
4149 ifp->if_flags &= ~IFF_UP;
4150 iwm_stop_locked(ifp);
4151 #if defined(__DragonFly__)
4154 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
4159 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4162 #if defined(__DragonFly__)
4164 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data, struct ucred *cred)
4167 iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
4170 struct iwm_softc *sc = ifp->if_softc;
4171 struct ieee80211com *ic = sc->sc_ic;
4172 struct ifreq *ifr = (struct ifreq *) data;
4173 int error = 0, startall = 0;
4177 error = ether_ioctl(ifp, cmd, data);
4180 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4184 #if defined(__DragonFly__)
4185 if (ifp->if_flags & IFF_UP) {
4186 if (!(ifp->if_flags & IFF_RUNNING)) {
4187 iwm_init_locked(sc);
4191 if (ifp->if_flags & IFF_RUNNING)
4192 iwm_stop_locked(ifp);
4195 if (ifp->if_flags & IFF_UP) {
4196 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4197 iwm_init_locked(sc);
4201 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4202 iwm_stop_locked(ifp);
4207 ieee80211_start_all(ic);
4219 * The interrupt side of things
4223 * error dumping routines are from iwlwifi/mvm/utils.c
4227 * Note: This structure is read from the device with IO accesses,
4228 * and the reading already does the endian conversion. As it is
4229 * read with uint32_t-sized accesses, any members with a different size
4230 * need to be ordered correctly though!
4232 struct iwm_error_event_table {
4233 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4234 uint32_t error_id; /* type of error */
4235 uint32_t pc; /* program counter */
4236 uint32_t blink1; /* branch link */
4237 uint32_t blink2; /* branch link */
4238 uint32_t ilink1; /* interrupt link */
4239 uint32_t ilink2; /* interrupt link */
4240 uint32_t data1; /* error-specific data */
4241 uint32_t data2; /* error-specific data */
4242 uint32_t data3; /* error-specific data */
4243 uint32_t bcon_time; /* beacon timer */
4244 uint32_t tsf_low; /* network timestamp function timer */
4245 uint32_t tsf_hi; /* network timestamp function timer */
4246 uint32_t gp1; /* GP1 timer register */
4247 uint32_t gp2; /* GP2 timer register */
4248 uint32_t gp3; /* GP3 timer register */
4249 uint32_t ucode_ver; /* uCode version */
4250 uint32_t hw_ver; /* HW Silicon version */
4251 uint32_t brd_ver; /* HW board version */
4252 uint32_t log_pc; /* log program counter */
4253 uint32_t frame_ptr; /* frame pointer */
4254 uint32_t stack_ptr; /* stack pointer */
4255 uint32_t hcmd; /* last host command header */
4256 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4258 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4260 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4262 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4264 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4266 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
4267 uint32_t wait_event; /* wait event() caller address */
4268 uint32_t l2p_control; /* L2pControlField */
4269 uint32_t l2p_duration; /* L2pDurationField */
4270 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4271 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4272 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
4274 uint32_t u_timestamp; /* indicate when the date and time of the
4276 uint32_t flow_handler; /* FH read/write pointers, RX credit */
4279 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
4280 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
4286 } advanced_lookup[] = {
4287 { "NMI_INTERRUPT_WDG", 0x34 },
4288 { "SYSASSERT", 0x35 },
4289 { "UCODE_VERSION_MISMATCH", 0x37 },
4290 { "BAD_COMMAND", 0x38 },
4291 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4292 { "FATAL_ERROR", 0x3D },
4293 { "NMI_TRM_HW_ERR", 0x46 },
4294 { "NMI_INTERRUPT_TRM", 0x4C },
4295 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4296 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4297 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4298 { "NMI_INTERRUPT_HOST", 0x66 },
4299 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4300 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4301 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4302 { "ADVANCED_SYSASSERT", 0 },
4306 iwm_desc_lookup(uint32_t num)
4310 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4311 if (advanced_lookup[i].num == num)
4312 return advanced_lookup[i].name;
4314 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4315 return advanced_lookup[i].name;
4319 * Support for dumping the error log seemed like a good idea ...
4320 * but it's mostly hex junk and the only sensible thing is the
4321 * hw/ucode revision (which we know anyway). Since it's here,
4322 * I'll just leave it in, just in case e.g. the Intel guys want to
4323 * help us decipher some "ADVANCED_SYSASSERT" later.
4326 iwm_nic_error(struct iwm_softc *sc)
4328 struct iwm_error_event_table table;
4331 device_printf(sc->sc_dev, "dumping device error log\n");
4332 base = sc->sc_uc.uc_error_event_table;
4333 if (base < 0x800000 || base >= 0x80C000) {
4334 device_printf(sc->sc_dev,
4335 "Not valid error log pointer 0x%08x\n", base);
4339 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
4340 device_printf(sc->sc_dev, "reading errlog failed\n");
4345 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4349 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4350 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4351 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4352 sc->sc_flags, table.valid);
4355 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4356 iwm_desc_lookup(table.error_id));
4357 device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4358 device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4359 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4360 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4361 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4362 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4363 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4364 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4365 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4366 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4367 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4368 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4369 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4370 device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4371 device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4372 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4373 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4374 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4375 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4376 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4377 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4378 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4379 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4380 device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4381 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4382 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4383 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4384 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4385 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4386 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4387 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4388 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4392 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
4394 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4395 _var_ = (void *)((_pkt_)+1); \
4396 } while (/*CONSTCOND*/0)
4398 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
4400 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4401 _ptr_ = (void *)((_pkt_)+1); \
4402 } while (/*CONSTCOND*/0)
4404 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4407 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4408 * Basic structure from if_iwn
4411 iwm_notif_intr(struct iwm_softc *sc)
4415 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4416 BUS_DMASYNC_POSTREAD);
4418 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4423 while (sc->rxq.cur != hw) {
4424 struct iwm_rx_ring *ring = &sc->rxq;
4425 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4426 struct iwm_rx_packet *pkt;
4427 struct iwm_cmd_response *cresp;
4430 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4431 BUS_DMASYNC_POSTREAD);
4432 pkt = mtod(data->m, struct iwm_rx_packet *);
4434 qid = pkt->hdr.qid & ~0x80;
4437 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4438 "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4439 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4440 pkt->hdr.code, sc->rxq.cur, hw);
4443 * randomly get these from the firmware, no idea why.
4444 * they at least seem harmless, so just ignore them for now
4446 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4447 || pkt->len_n_flags == htole32(0x55550000))) {
4452 switch (pkt->hdr.code) {
4453 case IWM_REPLY_RX_PHY_CMD:
4454 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4457 case IWM_REPLY_RX_MPDU_CMD:
4458 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4462 iwm_mvm_rx_tx_cmd(sc, pkt, data);
4465 case IWM_MISSED_BEACONS_NOTIFICATION: {
4466 struct iwm_missed_beacons_notif *resp;
4469 /* XXX look at mac_id to determine interface ID */
4470 struct ieee80211com *ic = sc->sc_ic;
4471 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4473 SYNC_RESP_STRUCT(resp, pkt);
4474 missed = le32toh(resp->consec_missed_beacons);
4476 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4477 "%s: MISSED_BEACON: mac_id=%d, "
4478 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4481 le32toh(resp->mac_id),
4482 le32toh(resp->consec_missed_beacons_since_last_rx),
4483 le32toh(resp->consec_missed_beacons),
4484 le32toh(resp->num_expected_beacons),
4485 le32toh(resp->num_recvd_beacons));
4491 /* XXX no net80211 locking? */
4492 if (vap->iv_state == IEEE80211_S_RUN &&
4493 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4494 if (missed > vap->iv_bmissthreshold) {
4495 /* XXX bad locking; turn into task */
4497 ieee80211_beacon_miss(ic);
4504 case IWM_MVM_ALIVE: {
4505 struct iwm_mvm_alive_resp *resp;
4506 SYNC_RESP_STRUCT(resp, pkt);
4508 sc->sc_uc.uc_error_event_table
4509 = le32toh(resp->error_event_table_ptr);
4510 sc->sc_uc.uc_log_event_table
4511 = le32toh(resp->log_event_table_ptr);
4512 sc->sched_base = le32toh(resp->scd_base_ptr);
4513 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4515 sc->sc_uc.uc_intr = 1;
4519 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4520 struct iwm_calib_res_notif_phy_db *phy_db_notif;
4521 SYNC_RESP_STRUCT(phy_db_notif, pkt);
4523 iwm_phy_db_set_section(sc, phy_db_notif);
4527 case IWM_STATISTICS_NOTIFICATION: {
4528 struct iwm_notif_statistics *stats;
4529 SYNC_RESP_STRUCT(stats, pkt);
4530 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4531 sc->sc_noise = iwm_get_noise(&stats->rx.general);
4534 case IWM_NVM_ACCESS_CMD:
4535 if (sc->sc_wantresp == ((qid << 16) | idx)) {
4536 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4537 BUS_DMASYNC_POSTREAD);
4538 memcpy(sc->sc_cmd_resp,
4539 pkt, sizeof(sc->sc_cmd_resp));
4543 case IWM_PHY_CONFIGURATION_CMD:
4544 case IWM_TX_ANT_CONFIGURATION_CMD:
4546 case IWM_MAC_CONTEXT_CMD:
4547 case IWM_REPLY_SF_CFG_CMD:
4548 case IWM_POWER_TABLE_CMD:
4549 case IWM_PHY_CONTEXT_CMD:
4550 case IWM_BINDING_CONTEXT_CMD:
4551 case IWM_TIME_EVENT_CMD:
4552 case IWM_SCAN_REQUEST_CMD:
4553 case IWM_REPLY_BEACON_FILTERING_CMD:
4554 case IWM_MAC_PM_POWER_TABLE:
4555 case IWM_TIME_QUOTA_CMD:
4556 case IWM_REMOVE_STA:
4557 case IWM_TXPATH_FLUSH:
4559 SYNC_RESP_STRUCT(cresp, pkt);
4560 if (sc->sc_wantresp == ((qid << 16) | idx)) {
4561 memcpy(sc->sc_cmd_resp,
4562 pkt, sizeof(*pkt)+sizeof(*cresp));
4567 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4570 case IWM_INIT_COMPLETE_NOTIF:
4571 sc->sc_init_complete = 1;
4572 wakeup(&sc->sc_init_complete);
4575 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4576 struct iwm_scan_complete_notif *notif;
4577 SYNC_RESP_STRUCT(notif, pkt);
4578 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4581 case IWM_REPLY_ERROR: {
4582 struct iwm_error_resp *resp;
4583 SYNC_RESP_STRUCT(resp, pkt);
4585 device_printf(sc->sc_dev,
4586 "firmware error 0x%x, cmd 0x%x\n",
4587 le32toh(resp->error_type),
4591 case IWM_TIME_EVENT_NOTIFICATION: {
4592 struct iwm_time_event_notif *notif;
4593 SYNC_RESP_STRUCT(notif, pkt);
4595 if (notif->status) {
4596 if (le32toh(notif->action) &
4597 IWM_TE_V2_NOTIF_HOST_EVENT_START)
4598 sc->sc_auth_prot = 2;
4600 sc->sc_auth_prot = 0;
4602 sc->sc_auth_prot = -1;
4604 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4605 "%s: time event notification auth_prot=%d\n",
4606 __func__, sc->sc_auth_prot);
4608 wakeup(&sc->sc_auth_prot);
4611 case IWM_MCAST_FILTER_CMD:
4615 device_printf(sc->sc_dev,
4616 "cmd %04x frame %d/%d %x UNHANDLED (this should "
4618 pkt->hdr.code, qid, idx,
4620 panic("unhandled command");
4625 * Why test bit 0x80? The Linux driver:
4627 * There is one exception: uCode sets bit 15 when it
4628 * originates the response/notification, i.e. when the
4629 * response/notification is not a direct response to a
4630 * command sent by the driver. For example, uCode issues
4631 * IWM_REPLY_RX when it sends a received frame to the driver;
4632 * it is not a direct response to any driver command.
4634 * Ok, so since when is 7 == 15? Well, the Linux driver
4635 * uses a slightly different format for pkt->hdr, and "qid"
4636 * is actually the upper byte of a two-byte field.
4638 if (!(pkt->hdr.qid & (1 << 7))) {
4639 iwm_cmd_done(sc, pkt);
4645 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4646 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4649 * Tell the firmware what we have processed.
4650 * Seems like the hardware gets upset unless we align
4653 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4654 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4660 struct iwm_softc *sc = arg;
4661 struct ifnet *ifp = sc->sc_ifp;
4666 #if defined(__DragonFly__)
4667 if (sc->sc_mem == NULL) {
4668 kprintf("iwm_intr: detached\n");
4673 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4675 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4676 uint32_t *ict = sc->ict_dma.vaddr;
4679 tmp = htole32(ict[sc->ict_cur]);
4684 * ok, there was something. keep plowing until we have all.
4689 ict[sc->ict_cur] = 0;
4690 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4691 tmp = htole32(ict[sc->ict_cur]);
4694 /* this is where the fun begins. don't ask */
4695 if (r1 == 0xffffffff)
4698 /* i am not expected to understand this */
4701 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4703 r1 = IWM_READ(sc, IWM_CSR_INT);
4704 /* "hardware gone" (where, fishing?) */
4705 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4707 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4709 if (r1 == 0 && r2 == 0) {
4713 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4716 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4718 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4721 struct ieee80211com *ic = sc->sc_ic;
4722 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4726 /* Dump driver status (TX and RX rings) while we're here. */
4727 device_printf(sc->sc_dev, "driver status:\n");
4728 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4729 struct iwm_tx_ring *ring = &sc->txq[i];
4730 device_printf(sc->sc_dev,
4731 " tx ring %2d: qid=%-2d cur=%-3d "
4733 i, ring->qid, ring->cur, ring->queued);
4735 device_printf(sc->sc_dev,
4736 " rx ring: cur=%d\n", sc->rxq.cur);
4737 device_printf(sc->sc_dev,
4738 " 802.11 state %d\n", vap->iv_state);
4741 device_printf(sc->sc_dev, "fatal firmware error\n");
4742 ifp->if_flags &= ~IFF_UP;
4743 iwm_stop_locked(ifp);
4749 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4750 handled |= IWM_CSR_INT_BIT_HW_ERR;
4751 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4752 ifp->if_flags &= ~IFF_UP;
4753 iwm_stop_locked(ifp);
4758 /* firmware chunk loaded */
4759 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4760 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4761 handled |= IWM_CSR_INT_BIT_FH_TX;
4762 sc->sc_fw_chunk_done = 1;
4766 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4767 handled |= IWM_CSR_INT_BIT_RF_KILL;
4768 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
4769 device_printf(sc->sc_dev,
4770 "%s: rfkill switch, disabling interface\n",
4772 ifp->if_flags &= ~IFF_UP;
4773 iwm_stop_locked(ifp);
4778 * The Linux driver uses periodic interrupts to avoid races.
4779 * We cargo-cult like it's going out of fashion.
4781 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4782 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4783 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4784 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4786 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4790 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4791 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4792 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4796 /* enable periodic interrupt, see above */
4797 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4798 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4799 IWM_CSR_INT_PERIODIC_ENA);
4802 if (__predict_false(r1 & ~handled))
4803 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4804 "%s: unhandled interrupts: %x\n", __func__, r1);
4808 iwm_restore_interrupts(sc);
4815 * Autoconf glue-sniffing
4817 #define PCI_VENDOR_INTEL 0x8086
4818 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
4819 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
4820 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
4821 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
4822 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
4823 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
4825 static const struct iwm_devices {
4829 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4830 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4831 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4832 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4833 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4834 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4838 iwm_probe(device_t dev)
4842 for (i = 0; i < nitems(iwm_devices); i++) {
4843 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4844 pci_get_device(dev) == iwm_devices[i].device) {
4845 device_set_desc(dev, iwm_devices[i].name);
4846 return (BUS_PROBE_DEFAULT);
4854 iwm_dev_check(device_t dev)
4856 struct iwm_softc *sc;
4858 sc = device_get_softc(dev);
4860 switch (pci_get_device(dev)) {
4861 case PCI_PRODUCT_INTEL_WL_3160_1:
4862 case PCI_PRODUCT_INTEL_WL_3160_2:
4863 sc->sc_fwname = "iwm3160fw";
4864 sc->host_interrupt_operation_mode = 1;
4866 case PCI_PRODUCT_INTEL_WL_7260_1:
4867 case PCI_PRODUCT_INTEL_WL_7260_2:
4868 sc->sc_fwname = "iwm7260fw";
4869 sc->host_interrupt_operation_mode = 1;
4871 case PCI_PRODUCT_INTEL_WL_7265_1:
4872 case PCI_PRODUCT_INTEL_WL_7265_2:
4873 sc->sc_fwname = "iwm7265fw";
4874 sc->host_interrupt_operation_mode = 0;
4877 device_printf(dev, "unknown adapter type\n");
4883 iwm_pci_attach(device_t dev)
4885 struct iwm_softc *sc;
4886 int count, error, rid;
4888 #if defined(__DragonFly__)
4892 sc = device_get_softc(dev);
4894 /* Clear device-specific "PCI retry timeout" register (41h). */
4895 reg = pci_read_config(dev, 0x40, sizeof(reg));
4896 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4898 /* Enable bus-mastering and hardware bug workaround. */
4899 pci_enable_busmaster(dev);
4900 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4902 if (reg & PCIM_STATUS_INTxSTATE) {
4903 reg &= ~PCIM_STATUS_INTxSTATE;
4905 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4908 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4910 if (sc->sc_mem == NULL) {
4911 device_printf(sc->sc_dev, "can't map mem space\n");
4914 sc->sc_st = rman_get_bustag(sc->sc_mem);
4915 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4917 /* Install interrupt handler. */
4920 #if defined(__DragonFly__)
4921 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
4922 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
4924 if (pci_alloc_msi(dev, &count) == 0)
4926 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4927 (rid != 0 ? 0 : RF_SHAREABLE));
4929 if (sc->sc_irq == NULL) {
4930 device_printf(dev, "can't map interrupt\n");
4933 #if defined(__DragonFly__)
4934 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
4935 iwm_intr, sc, &sc->sc_ih,
4936 &wlan_global_serializer);
4938 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4939 NULL, iwm_intr, sc, &sc->sc_ih);
4941 if (sc->sc_ih == NULL) {
4942 device_printf(dev, "can't establish interrupt");
4943 #if defined(__DragonFly__)
4944 pci_release_msi(dev);
4948 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4954 iwm_pci_detach(device_t dev)
4956 struct iwm_softc *sc = device_get_softc(dev);
4958 if (sc->sc_irq != NULL) {
4959 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4960 bus_release_resource(dev, SYS_RES_IRQ,
4961 rman_get_rid(sc->sc_irq), sc->sc_irq);
4962 pci_release_msi(dev);
4963 #if defined(__DragonFly__)
4967 if (sc->sc_mem != NULL) {
4968 bus_release_resource(dev, SYS_RES_MEMORY,
4969 rman_get_rid(sc->sc_mem), sc->sc_mem);
4970 #if defined(__DragonFly__)
4979 iwm_attach(device_t dev)
4981 struct iwm_softc *sc;
4982 struct ieee80211com *ic;
4987 sc = device_get_softc(dev);
4989 #if defined(__DragonFly__)
4990 lockinit(&sc->sc_lk, "iwm_lk", 0, 0);
4991 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
4993 mtx_init(&sc->sc_mtx, "iwm_mtx", MTX_DEF, 0);
4994 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4996 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4997 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4998 taskqueue_thread_enqueue, &sc->sc_tq);
4999 #if defined(__DragonFly__)
5000 error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5003 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5006 device_printf(dev, "can't start threads, error %d\n",
5012 error = iwm_pci_attach(dev);
5016 sc->sc_wantresp = -1;
5018 /* Check device type */
5019 error = iwm_dev_check(dev);
5023 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5026 * We now start fiddling with the hardware
5028 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5029 if (iwm_prepare_card_hw(sc) != 0) {
5030 device_printf(dev, "could not initialize hardware\n");
5034 /* Allocate DMA memory for firmware transfers. */
5035 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5036 device_printf(dev, "could not allocate memory for firmware\n");
5040 /* Allocate "Keep Warm" page. */
5041 if ((error = iwm_alloc_kw(sc)) != 0) {
5042 device_printf(dev, "could not allocate keep warm page\n");
5046 /* We use ICT interrupts */
5047 if ((error = iwm_alloc_ict(sc)) != 0) {
5048 device_printf(dev, "could not allocate ICT table\n");
5052 /* Allocate TX scheduler "rings". */
5053 if ((error = iwm_alloc_sched(sc)) != 0) {
5054 device_printf(dev, "could not allocate TX scheduler rings\n");
5058 /* Allocate TX rings */
5059 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5060 if ((error = iwm_alloc_tx_ring(sc,
5061 &sc->txq[txq_i], txq_i)) != 0) {
5063 "could not allocate TX ring %d\n",
5069 /* Allocate RX ring. */
5070 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5071 device_printf(dev, "could not allocate RX ring\n");
5075 /* Clear pending interrupts. */
5076 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5078 sc->sc_ifp = ifp = if_alloc(IFT_IEEE80211);
5083 if_initname(ifp, "iwm", device_get_unit(dev));
5084 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
5085 ifp->if_init = iwm_init;
5086 ifp->if_ioctl = iwm_ioctl;
5087 ifp->if_start = iwm_start;
5088 #if defined(__DragonFly__)
5089 ifp->if_nmbjclusters = IWM_RX_RING_COUNT;
5090 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
5092 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
5093 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
5094 IFQ_SET_READY(&ifp->if_snd);
5098 * Set it here so we can initialise net80211.
5099 * But, if we fail before we call net80211_ifattach(),
5100 * we can't just call iwm_detach() or it'll free
5101 * net80211 without it having been setup.
5103 sc->sc_ic = ic = ifp->if_l2com;
5105 #if defined(__DragonFly__)
5108 ic->ic_name = device_get_nameunit(sc->sc_dev);
5110 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
5111 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
5113 /* Set device capabilities. */
5116 IEEE80211_C_WPA | /* WPA/RSN */
5118 IEEE80211_C_SHSLOT | /* short slot time supported */
5119 IEEE80211_C_SHPREAMBLE /* short preamble supported */
5120 // IEEE80211_C_BGSCAN /* capable of bg scanning */
5122 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5123 sc->sc_phyctxt[i].id = i;
5124 sc->sc_phyctxt[i].color = 0;
5125 sc->sc_phyctxt[i].ref = 0;
5126 sc->sc_phyctxt[i].channel = NULL;
5130 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5131 sc->sc_preinit_hook.ich_func = iwm_preinit;
5132 sc->sc_preinit_hook.ich_arg = sc;
5133 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5134 device_printf(dev, "config_intrhook_establish failed\n");
5139 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5140 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5141 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5144 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5145 "<-%s\n", __func__);
5149 /* Free allocated memory if something failed during attachment. */
5151 iwm_detach_local(sc, 0);
5157 iwm_update_edca(struct ieee80211com *ic)
5159 struct iwm_softc *sc = ic->ic_ifp->if_softc;
5161 device_printf(sc->sc_dev, "%s: called\n", __func__);
5166 iwm_preinit(void *arg)
5168 struct iwm_softc *sc = arg;
5169 device_t dev = sc->sc_dev;
5170 struct ieee80211com *ic = sc->sc_ic;
5173 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5174 "->%s\n", __func__);
5177 if ((error = iwm_start_hw(sc)) != 0) {
5178 device_printf(dev, "could not initialize hardware\n");
5183 error = iwm_run_init_mvm_ucode(sc, 1);
5184 iwm_stop_device(sc);
5190 "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
5191 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5192 IWM_UCODE_MAJOR(sc->sc_fwver),
5193 IWM_UCODE_MINOR(sc->sc_fwver),
5194 IWM_UCODE_API(sc->sc_fwver));
5196 /* not all hardware can do 5GHz band */
5197 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5198 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5199 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5203 * At this point we've committed - if we fail to do setup,
5204 * we now also have to tear down the net80211 state.
5206 wlan_serialize_enter();
5207 ieee80211_ifattach(ic, sc->sc_bssid);
5208 wlan_serialize_exit();
5209 ic->ic_vap_create = iwm_vap_create;
5210 ic->ic_vap_delete = iwm_vap_delete;
5211 ic->ic_raw_xmit = iwm_raw_xmit;
5212 ic->ic_node_alloc = iwm_node_alloc;
5213 ic->ic_scan_start = iwm_scan_start;
5214 ic->ic_scan_end = iwm_scan_end;
5215 ic->ic_update_mcast = iwm_update_mcast;
5216 ic->ic_set_channel = iwm_set_channel;
5217 ic->ic_scan_curchan = iwm_scan_curchan;
5218 ic->ic_scan_mindwell = iwm_scan_mindwell;
5219 ic->ic_wme.wme_update = iwm_update_edca;
5220 iwm_radiotap_attach(sc);
5222 ieee80211_announce(ic);
5224 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5225 "<-%s\n", __func__);
5226 config_intrhook_disestablish(&sc->sc_preinit_hook);
5230 config_intrhook_disestablish(&sc->sc_preinit_hook);
5231 iwm_detach_local(sc, 0);
5235 * Attach the interface to 802.11 radiotap.
5238 iwm_radiotap_attach(struct iwm_softc *sc)
5240 struct ifnet *ifp = sc->sc_ifp;
5241 struct ieee80211com *ic = ifp->if_l2com;
5243 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5244 "->%s begin\n", __func__);
5245 ieee80211_radiotap_attach(ic,
5246 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5247 IWM_TX_RADIOTAP_PRESENT,
5248 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5249 IWM_RX_RADIOTAP_PRESENT);
5250 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5251 "->%s end\n", __func__);
5254 static struct ieee80211vap *
5255 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5256 enum ieee80211_opmode opmode, int flags,
5257 const uint8_t bssid[IEEE80211_ADDR_LEN],
5258 const uint8_t mac[IEEE80211_ADDR_LEN])
5260 struct iwm_vap *ivp;
5261 struct ieee80211vap *vap;
5262 uint8_t mac1[IEEE80211_ADDR_LEN];
5264 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
5266 IEEE80211_ADDR_COPY(mac1, mac);
5267 ivp = (struct iwm_vap *) kmalloc(sizeof(struct iwm_vap),
5268 M_80211_VAP, M_INTWAIT | M_ZERO);
5270 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
5271 IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
5272 vap->iv_bmissthreshold = 10; /* override default */
5273 /* Override with driver methods. */
5274 ivp->iv_newstate = vap->iv_newstate;
5275 vap->iv_newstate = iwm_newstate;
5277 ieee80211_ratectl_init(vap);
5278 /* Complete setup. */
5279 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status);
5280 ic->ic_opmode = opmode;
5286 iwm_vap_delete(struct ieee80211vap *vap)
5288 struct iwm_vap *ivp = IWM_VAP(vap);
5290 ieee80211_ratectl_deinit(vap);
5291 ieee80211_vap_detach(vap);
5292 kfree(ivp, M_80211_VAP);
5296 iwm_scan_start(struct ieee80211com *ic)
5298 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5299 struct iwm_softc *sc = ic->ic_ifp->if_softc;
5302 if (sc->sc_scanband)
5305 error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
5307 device_printf(sc->sc_dev, "could not initiate scan\n");
5309 wlan_serialize_enter();
5310 ieee80211_cancel_scan(vap);
5311 wlan_serialize_exit();
5317 iwm_scan_end(struct ieee80211com *ic)
5322 iwm_update_mcast(struct ifnet *ifp)
5327 iwm_set_channel(struct ieee80211com *ic)
5332 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
5337 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
5343 iwm_init_task(void *arg1)
5345 struct iwm_softc *sc = arg1;
5346 struct ifnet *ifp = sc->sc_ifp;
5349 while (sc->sc_flags & IWM_FLAG_BUSY) {
5350 #if defined(__DragonFly__)
5351 iwmsleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
5353 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
5356 sc->sc_flags |= IWM_FLAG_BUSY;
5357 iwm_stop_locked(ifp);
5358 #if defined(__DragonFly__)
5359 if ((ifp->if_flags & IFF_UP) &&
5360 (ifp->if_flags & IFF_RUNNING))
5362 if ((ifp->if_flags & IFF_UP) &&
5363 (ifp->if_drv_flags & IFF_DRV_RUNNING))
5366 sc->sc_flags &= ~IWM_FLAG_BUSY;
5367 wakeup(&sc->sc_flags);
5372 iwm_resume(device_t dev)
5376 /* Clear device-specific "PCI retry timeout" register (41h). */
5377 reg = pci_read_config(dev, 0x40, sizeof(reg));
5378 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5379 iwm_init_task(device_get_softc(dev));
5385 iwm_suspend(device_t dev)
5387 struct iwm_softc *sc = device_get_softc(dev);
5388 struct ifnet *ifp = sc->sc_ifp;
5390 #if defined(__DragonFly__)
5391 if (ifp->if_flags & IFF_RUNNING)
5393 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5401 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
5403 struct ifnet *ifp = sc->sc_ifp;
5404 struct ieee80211com *ic;
5405 struct iwm_fw_info *fw = &sc->sc_fw;
5406 device_t dev = sc->sc_dev;
5410 #if defined(__DragonFly__)
5411 /* doesn't exist for DFly, DFly drains tasks on free */
5413 taskqueue_drain_all(sc->sc_tq);
5415 taskqueue_free(sc->sc_tq);
5416 #if defined(__DragonFly__)
5421 callout_drain(&sc->sc_watchdog_to);
5423 iwm_stop_device(sc);
5424 if (ic && do_net80211) {
5425 wlan_serialize_enter();
5426 ieee80211_ifdetach(ic);
5427 wlan_serialize_exit();
5430 #if defined(__DragonFly__)
5435 /* Free descriptor rings */
5436 for (i = 0; i < nitems(sc->txq); i++)
5437 iwm_free_tx_ring(sc, &sc->txq[i]);
5440 if (fw->fw_rawdata != NULL)
5441 iwm_fw_info_free(fw);
5443 /* free scheduler */
5445 if (sc->ict_dma.vaddr != NULL)
5447 if (sc->kw_dma.vaddr != NULL)
5449 if (sc->fw_dma.vaddr != NULL)
5452 /* Finished with the hardware - detach things */
5453 iwm_pci_detach(dev);
5455 lockuninit(&sc->sc_lk);
5461 iwm_detach(device_t dev)
5463 struct iwm_softc *sc = device_get_softc(dev);
5466 error = iwm_detach_local(sc, 1);
5471 static device_method_t iwm_pci_methods[] = {
5472 /* Device interface */
5473 DEVMETHOD(device_probe, iwm_probe),
5474 DEVMETHOD(device_attach, iwm_attach),
5475 DEVMETHOD(device_detach, iwm_detach),
5476 DEVMETHOD(device_suspend, iwm_suspend),
5477 DEVMETHOD(device_resume, iwm_resume),
5482 static driver_t iwm_pci_driver = {
5485 sizeof (struct iwm_softc)
5488 static devclass_t iwm_devclass;
5490 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5491 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5492 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5493 MODULE_DEPEND(iwm, wlan, 1, 1, 1);