if_iwm - Sync nvm parsing code with Linux iwlwifi.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
... / ...
CommitLineData
1/* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105/*
106 * DragonFly work
107 *
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
111 *
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
121 *
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
124 *
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
128 * msleep -> lksleep
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
135 */
136#include <sys/cdefs.h>
137__FBSDID("$FreeBSD$");
138
139#include <sys/param.h>
140#include <sys/bus.h>
141#include <sys/endian.h>
142#include <sys/firmware.h>
143#include <sys/kernel.h>
144#include <sys/malloc.h>
145#include <sys/mbuf.h>
146#include <sys/module.h>
147#include <sys/rman.h>
148#include <sys/sysctl.h>
149#include <sys/linker.h>
150
151#include <machine/endian.h>
152
153#include <bus/pci/pcivar.h>
154#include <bus/pci/pcireg.h>
155
156#include <net/bpf.h>
157
158#include <net/if.h>
159#include <net/if_var.h>
160#include <net/if_arp.h>
161#include <net/if_dl.h>
162#include <net/if_media.h>
163#include <net/if_types.h>
164
165#include <netinet/in.h>
166#include <netinet/in_systm.h>
167#include <netinet/if_ether.h>
168#include <netinet/ip.h>
169
170#include <netproto/802_11/ieee80211_var.h>
171#include <netproto/802_11/ieee80211_regdomain.h>
172#include <netproto/802_11/ieee80211_ratectl.h>
173#include <netproto/802_11/ieee80211_radiotap.h>
174
175#include "if_iwmreg.h"
176#include "if_iwmvar.h"
177#include "if_iwm_debug.h"
178#include "if_iwm_util.h"
179#include "if_iwm_binding.h"
180#include "if_iwm_phy_db.h"
181#include "if_iwm_mac_ctxt.h"
182#include "if_iwm_phy_ctxt.h"
183#include "if_iwm_time_event.h"
184#include "if_iwm_power.h"
185#include "if_iwm_scan.h"
186#include "if_iwm_pcie_trans.h"
187#include "if_iwm_led.h"
188
189const uint8_t iwm_nvm_channels[] = {
190 /* 2.4 GHz */
191 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
192 /* 5 GHz */
193 36, 40, 44, 48, 52, 56, 60, 64,
194 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
195 149, 153, 157, 161, 165
196};
197_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
198 "IWM_NUM_CHANNELS is too small");
199
200const uint8_t iwm_nvm_channels_8000[] = {
201 /* 2.4 GHz */
202 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
203 /* 5 GHz */
204 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
205 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
206 149, 153, 157, 161, 165, 169, 173, 177, 181
207};
208_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
209 "IWM_NUM_CHANNELS_8000 is too small");
210
211#define IWM_NUM_2GHZ_CHANNELS 14
212#define IWM_N_HW_ADDR_MASK 0xF
213
214/*
215 * XXX For now, there's simply a fixed set of rate table entries
216 * that are populated.
217 */
218const struct iwm_rate {
219 uint8_t rate;
220 uint8_t plcp;
221} iwm_rates[] = {
222 { 2, IWM_RATE_1M_PLCP },
223 { 4, IWM_RATE_2M_PLCP },
224 { 11, IWM_RATE_5M_PLCP },
225 { 22, IWM_RATE_11M_PLCP },
226 { 12, IWM_RATE_6M_PLCP },
227 { 18, IWM_RATE_9M_PLCP },
228 { 24, IWM_RATE_12M_PLCP },
229 { 36, IWM_RATE_18M_PLCP },
230 { 48, IWM_RATE_24M_PLCP },
231 { 72, IWM_RATE_36M_PLCP },
232 { 96, IWM_RATE_48M_PLCP },
233 { 108, IWM_RATE_54M_PLCP },
234};
235#define IWM_RIDX_CCK 0
236#define IWM_RIDX_OFDM 4
237#define IWM_RIDX_MAX (nitems(iwm_rates)-1)
238#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
239#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
240
241struct iwm_nvm_section {
242 uint16_t length;
243 uint8_t *data;
244};
245
246static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
247static int iwm_firmware_store_section(struct iwm_softc *,
248 enum iwm_ucode_type,
249 const uint8_t *, size_t);
250static int iwm_set_default_calib(struct iwm_softc *, const void *);
251static void iwm_fw_info_free(struct iwm_fw_info *);
252static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
253#if !defined(__DragonFly__)
254static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
255#endif
256static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 bus_size_t, bus_size_t);
258static void iwm_dma_contig_free(struct iwm_dma_info *);
259static int iwm_alloc_fwmem(struct iwm_softc *);
260static int iwm_alloc_sched(struct iwm_softc *);
261static int iwm_alloc_kw(struct iwm_softc *);
262static int iwm_alloc_ict(struct iwm_softc *);
263static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264static void iwm_disable_rx_dma(struct iwm_softc *);
265static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
266static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
267static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
268 int);
269static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
270static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
271static void iwm_enable_interrupts(struct iwm_softc *);
272static void iwm_restore_interrupts(struct iwm_softc *);
273static void iwm_disable_interrupts(struct iwm_softc *);
274static void iwm_ict_reset(struct iwm_softc *);
275static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
276static void iwm_stop_device(struct iwm_softc *);
277static void iwm_mvm_nic_config(struct iwm_softc *);
278static int iwm_nic_rx_init(struct iwm_softc *);
279static int iwm_nic_tx_init(struct iwm_softc *);
280static int iwm_nic_init(struct iwm_softc *);
281static int iwm_enable_txq(struct iwm_softc *, int, int, int);
282static int iwm_post_alive(struct iwm_softc *);
283static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
284 uint16_t, uint8_t *, uint16_t *);
285static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
286 uint16_t *, uint32_t);
287static uint32_t iwm_eeprom_channel_flags(uint16_t);
288static void iwm_add_channel_band(struct iwm_softc *,
289 struct ieee80211_channel[], int, int *, int, size_t,
290 const uint8_t[]);
291static void iwm_init_channel_map(struct ieee80211com *, int, int *,
292 struct ieee80211_channel[]);
293static struct iwm_nvm_data *
294 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
295 const uint16_t *, const uint16_t *,
296 const uint16_t *, const uint16_t *,
297 const uint16_t *);
298static void iwm_free_nvm_data(struct iwm_nvm_data *);
299static void iwm_set_hw_address_family_8000(struct iwm_softc *,
300 struct iwm_nvm_data *,
301 const uint16_t *,
302 const uint16_t *);
303static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
304 const uint16_t *);
305static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
306static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
307 const uint16_t *);
308static int iwm_get_n_hw_addrs(const struct iwm_softc *,
309 const uint16_t *);
310static void iwm_set_radio_cfg(const struct iwm_softc *,
311 struct iwm_nvm_data *, uint32_t);
312static struct iwm_nvm_data *
313 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
314static int iwm_nvm_init(struct iwm_softc *);
315static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
316 const uint8_t *, uint32_t);
317static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
318 const uint8_t *, uint32_t);
319static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
320static int iwm_load_cpu_sections_8000(struct iwm_softc *,
321 struct iwm_fw_sects *, int , int *);
322static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
323static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
324static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
325static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
326static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
327static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
328 enum iwm_ucode_type);
329static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
330static int iwm_rx_addbuf(struct iwm_softc *, int, int);
331static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
332static int iwm_mvm_get_signal_strength(struct iwm_softc *,
333 struct iwm_rx_phy_info *);
334static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
335 struct iwm_rx_packet *,
336 struct iwm_rx_data *);
337static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
338static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
339 struct iwm_rx_data *);
340static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
341 struct iwm_rx_packet *,
342 struct iwm_node *);
343static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
344 struct iwm_rx_data *);
345static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
346#if 0
347static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
348 uint16_t);
349#endif
350static const struct iwm_rate *
351 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
352 struct ieee80211_frame *, struct iwm_tx_cmd *);
353static int iwm_tx(struct iwm_softc *, struct mbuf *,
354 struct ieee80211_node *, int);
355static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
356 const struct ieee80211_bpf_params *);
357static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
358 struct iwm_mvm_add_sta_cmd_v7 *,
359 int *);
360static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
361 int);
362static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
363static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
364static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
365 struct iwm_int_sta *,
366 const uint8_t *, uint16_t, uint16_t);
367static int iwm_mvm_add_aux_sta(struct iwm_softc *);
368static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
369static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
370static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
371static int iwm_release(struct iwm_softc *, struct iwm_node *);
372static struct ieee80211_node *
373 iwm_node_alloc(struct ieee80211vap *,
374 const uint8_t[IEEE80211_ADDR_LEN]);
375static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
376static int iwm_media_change(struct ifnet *);
377static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
378static void iwm_endscan_cb(void *, int);
379static void iwm_mvm_fill_sf_command(struct iwm_softc *,
380 struct iwm_sf_cfg_cmd *,
381 struct ieee80211_node *);
382static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
383static int iwm_send_bt_init_conf(struct iwm_softc *);
384static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
386static int iwm_init_hw(struct iwm_softc *);
387static void iwm_init(struct iwm_softc *);
388static void iwm_start(struct iwm_softc *);
389static void iwm_stop(struct iwm_softc *);
390static void iwm_watchdog(void *);
391static void iwm_parent(struct ieee80211com *);
392#ifdef IWM_DEBUG
393static const char *
394 iwm_desc_lookup(uint32_t);
395static void iwm_nic_error(struct iwm_softc *);
396static void iwm_nic_umac_error(struct iwm_softc *);
397#endif
398static void iwm_notif_intr(struct iwm_softc *);
399static void iwm_intr(void *);
400static int iwm_attach(device_t);
401static int iwm_is_valid_ether_addr(uint8_t *);
402static void iwm_preinit(void *);
403static int iwm_detach_local(struct iwm_softc *sc, int);
404static void iwm_init_task(void *);
405static void iwm_radiotap_attach(struct iwm_softc *);
406static struct ieee80211vap *
407 iwm_vap_create(struct ieee80211com *,
408 const char [IFNAMSIZ], int,
409 enum ieee80211_opmode, int,
410 const uint8_t [IEEE80211_ADDR_LEN],
411 const uint8_t [IEEE80211_ADDR_LEN]);
412static void iwm_vap_delete(struct ieee80211vap *);
413static void iwm_scan_start(struct ieee80211com *);
414static void iwm_scan_end(struct ieee80211com *);
415static void iwm_update_mcast(struct ieee80211com *);
416static void iwm_set_channel(struct ieee80211com *);
417static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
418static void iwm_scan_mindwell(struct ieee80211_scan_state *);
419static int iwm_detach(device_t);
420
421#if defined(__DragonFly__)
422static int iwm_msi_enable = 1;
423
424TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
425
426#endif
427
428/*
429 * Firmware parser.
430 */
431
432static int
433iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
434{
435 const struct iwm_fw_cscheme_list *l = (const void *)data;
436
437 if (dlen < sizeof(*l) ||
438 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
439 return EINVAL;
440
441 /* we don't actually store anything for now, always use s/w crypto */
442
443 return 0;
444}
445
446static int
447iwm_firmware_store_section(struct iwm_softc *sc,
448 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
449{
450 struct iwm_fw_sects *fws;
451 struct iwm_fw_onesect *fwone;
452
453 if (type >= IWM_UCODE_TYPE_MAX)
454 return EINVAL;
455 if (dlen < sizeof(uint32_t))
456 return EINVAL;
457
458 fws = &sc->sc_fw.fw_sects[type];
459 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
460 return EINVAL;
461
462 fwone = &fws->fw_sect[fws->fw_count];
463
464 /* first 32bit are device load offset */
465 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
466
467 /* rest is data */
468 fwone->fws_data = data + sizeof(uint32_t);
469 fwone->fws_len = dlen - sizeof(uint32_t);
470
471 fws->fw_count++;
472 fws->fw_totlen += fwone->fws_len;
473
474 return 0;
475}
476
477#define IWM_DEFAULT_SCAN_CHANNELS 40
478
479struct iwm_tlv_calib_data {
480 uint32_t ucode_type;
481 struct iwm_tlv_calib_ctrl calib;
482} __packed;
483
484static int
485iwm_set_default_calib(struct iwm_softc *sc, const void *data)
486{
487 const struct iwm_tlv_calib_data *def_calib = data;
488 uint32_t ucode_type = le32toh(def_calib->ucode_type);
489
490 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
491 device_printf(sc->sc_dev,
492 "Wrong ucode_type %u for default "
493 "calibration.\n", ucode_type);
494 return EINVAL;
495 }
496
497 sc->sc_default_calib[ucode_type].flow_trigger =
498 def_calib->calib.flow_trigger;
499 sc->sc_default_calib[ucode_type].event_trigger =
500 def_calib->calib.event_trigger;
501
502 return 0;
503}
504
505static void
506iwm_fw_info_free(struct iwm_fw_info *fw)
507{
508 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
509 fw->fw_fp = NULL;
510 /* don't touch fw->fw_status */
511 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
512}
513
514static int
515iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
516{
517 struct iwm_fw_info *fw = &sc->sc_fw;
518 const struct iwm_tlv_ucode_header *uhdr;
519 struct iwm_ucode_tlv tlv;
520 enum iwm_ucode_tlv_type tlv_type;
521 const struct firmware *fwp;
522 const uint8_t *data;
523 int error = 0;
524 size_t len;
525
526 if (fw->fw_status == IWM_FW_STATUS_DONE &&
527 ucode_type != IWM_UCODE_TYPE_INIT)
528 return 0;
529
530 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
531#if defined(__DragonFly__)
532 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
533#else
534 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
535#endif
536 }
537 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
538
539 if (fw->fw_fp != NULL)
540 iwm_fw_info_free(fw);
541
542 /*
543 * Load firmware into driver memory.
544 * fw_fp will be set.
545 */
546 IWM_UNLOCK(sc);
547 fwp = firmware_get(sc->sc_fwname);
548 IWM_LOCK(sc);
549 if (fwp == NULL) {
550 device_printf(sc->sc_dev,
551 "could not read firmware %s (error %d)\n",
552 sc->sc_fwname, error);
553 goto out;
554 }
555 fw->fw_fp = fwp;
556
557 /* (Re-)Initialize default values. */
558 sc->sc_capaflags = 0;
559 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
560 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
561 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
562
563 /*
564 * Parse firmware contents
565 */
566
567 uhdr = (const void *)fw->fw_fp->data;
568 if (*(const uint32_t *)fw->fw_fp->data != 0
569 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
570 device_printf(sc->sc_dev, "invalid firmware %s\n",
571 sc->sc_fwname);
572 error = EINVAL;
573 goto out;
574 }
575
576 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
577 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
578 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
579 IWM_UCODE_API(le32toh(uhdr->ver)));
580 data = uhdr->data;
581 len = fw->fw_fp->datasize - sizeof(*uhdr);
582
583 while (len >= sizeof(tlv)) {
584 size_t tlv_len;
585 const void *tlv_data;
586
587 memcpy(&tlv, data, sizeof(tlv));
588 tlv_len = le32toh(tlv.length);
589 tlv_type = le32toh(tlv.type);
590
591 len -= sizeof(tlv);
592 data += sizeof(tlv);
593 tlv_data = data;
594
595 if (len < tlv_len) {
596 device_printf(sc->sc_dev,
597 "firmware too short: %zu bytes\n",
598 len);
599 error = EINVAL;
600 goto parse_out;
601 }
602
603 switch ((int)tlv_type) {
604 case IWM_UCODE_TLV_PROBE_MAX_LEN:
605 if (tlv_len < sizeof(uint32_t)) {
606 device_printf(sc->sc_dev,
607 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
608 __func__,
609 (int) tlv_len);
610 error = EINVAL;
611 goto parse_out;
612 }
613 sc->sc_capa_max_probe_len
614 = le32toh(*(const uint32_t *)tlv_data);
615 /* limit it to something sensible */
616 if (sc->sc_capa_max_probe_len >
617 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
618 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
619 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
620 "ridiculous\n", __func__);
621 error = EINVAL;
622 goto parse_out;
623 }
624 break;
625 case IWM_UCODE_TLV_PAN:
626 if (tlv_len) {
627 device_printf(sc->sc_dev,
628 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
629 __func__,
630 (int) tlv_len);
631 error = EINVAL;
632 goto parse_out;
633 }
634 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
635 break;
636 case IWM_UCODE_TLV_FLAGS:
637 if (tlv_len < sizeof(uint32_t)) {
638 device_printf(sc->sc_dev,
639 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
640 __func__,
641 (int) tlv_len);
642 error = EINVAL;
643 goto parse_out;
644 }
645 /*
646 * Apparently there can be many flags, but Linux driver
647 * parses only the first one, and so do we.
648 *
649 * XXX: why does this override IWM_UCODE_TLV_PAN?
650 * Intentional or a bug? Observations from
651 * current firmware file:
652 * 1) TLV_PAN is parsed first
653 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
654 * ==> this resets TLV_PAN to itself... hnnnk
655 */
656 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
657 break;
658 case IWM_UCODE_TLV_CSCHEME:
659 if ((error = iwm_store_cscheme(sc,
660 tlv_data, tlv_len)) != 0) {
661 device_printf(sc->sc_dev,
662 "%s: iwm_store_cscheme(): returned %d\n",
663 __func__,
664 error);
665 goto parse_out;
666 }
667 break;
668 case IWM_UCODE_TLV_NUM_OF_CPU: {
669 uint32_t num_cpu;
670 if (tlv_len != sizeof(uint32_t)) {
671 device_printf(sc->sc_dev,
672 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
673 __func__,
674 (int) tlv_len);
675 error = EINVAL;
676 goto parse_out;
677 }
678 num_cpu = le32toh(*(const uint32_t *)tlv_data);
679 if (num_cpu < 1 || num_cpu > 2) {
680 device_printf(sc->sc_dev,
681 "%s: Driver supports only 1 or 2 CPUs\n",
682 __func__);
683 error = EINVAL;
684 goto parse_out;
685 }
686 break;
687 }
688 case IWM_UCODE_TLV_SEC_RT:
689 if ((error = iwm_firmware_store_section(sc,
690 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
691 device_printf(sc->sc_dev,
692 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
693 __func__,
694 error);
695 goto parse_out;
696 }
697 break;
698 case IWM_UCODE_TLV_SEC_INIT:
699 if ((error = iwm_firmware_store_section(sc,
700 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
701 device_printf(sc->sc_dev,
702 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
703 __func__,
704 error);
705 goto parse_out;
706 }
707 break;
708 case IWM_UCODE_TLV_SEC_WOWLAN:
709 if ((error = iwm_firmware_store_section(sc,
710 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
711 device_printf(sc->sc_dev,
712 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
713 __func__,
714 error);
715 goto parse_out;
716 }
717 break;
718 case IWM_UCODE_TLV_DEF_CALIB:
719 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
720 device_printf(sc->sc_dev,
721 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
722 __func__,
723 (int) tlv_len,
724 (int) sizeof(struct iwm_tlv_calib_data));
725 error = EINVAL;
726 goto parse_out;
727 }
728 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
729 device_printf(sc->sc_dev,
730 "%s: iwm_set_default_calib() failed: %d\n",
731 __func__,
732 error);
733 goto parse_out;
734 }
735 break;
736 case IWM_UCODE_TLV_PHY_SKU:
737 if (tlv_len != sizeof(uint32_t)) {
738 error = EINVAL;
739 device_printf(sc->sc_dev,
740 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
741 __func__,
742 (int) tlv_len);
743 goto parse_out;
744 }
745 sc->sc_fw_phy_config =
746 le32toh(*(const uint32_t *)tlv_data);
747 break;
748
749 case IWM_UCODE_TLV_API_CHANGES_SET: {
750 const struct iwm_ucode_api *api;
751 if (tlv_len != sizeof(*api)) {
752 error = EINVAL;
753 goto parse_out;
754 }
755 api = (const struct iwm_ucode_api *)tlv_data;
756 /* Flags may exceed 32 bits in future firmware. */
757 if (le32toh(api->api_index) > 0) {
758 device_printf(sc->sc_dev,
759 "unsupported API index %d\n",
760 le32toh(api->api_index));
761 goto parse_out;
762 }
763 sc->sc_ucode_api = le32toh(api->api_flags);
764 break;
765 }
766
767 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
768 const struct iwm_ucode_capa *capa;
769 int idx, i;
770 if (tlv_len != sizeof(*capa)) {
771 error = EINVAL;
772 goto parse_out;
773 }
774 capa = (const struct iwm_ucode_capa *)tlv_data;
775 idx = le32toh(capa->api_index);
776 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
777 device_printf(sc->sc_dev,
778 "unsupported API index %d\n", idx);
779 goto parse_out;
780 }
781 for (i = 0; i < 32; i++) {
782 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
783 continue;
784 setbit(sc->sc_enabled_capa, i + (32 * idx));
785 }
786 break;
787 }
788
789 case 48: /* undocumented TLV */
790 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
791 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
792 /* ignore, not used by current driver */
793 break;
794
795 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
796 if ((error = iwm_firmware_store_section(sc,
797 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
798 tlv_len)) != 0)
799 goto parse_out;
800 break;
801
802 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
803 if (tlv_len != sizeof(uint32_t)) {
804 error = EINVAL;
805 goto parse_out;
806 }
807 sc->sc_capa_n_scan_channels =
808 le32toh(*(const uint32_t *)tlv_data);
809 break;
810
811 case IWM_UCODE_TLV_FW_VERSION:
812 if (tlv_len != sizeof(uint32_t) * 3) {
813 error = EINVAL;
814 goto parse_out;
815 }
816 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
817 "%d.%d.%d",
818 le32toh(((const uint32_t *)tlv_data)[0]),
819 le32toh(((const uint32_t *)tlv_data)[1]),
820 le32toh(((const uint32_t *)tlv_data)[2]));
821 break;
822
823 default:
824 device_printf(sc->sc_dev,
825 "%s: unknown firmware section %d, abort\n",
826 __func__, tlv_type);
827 error = EINVAL;
828 goto parse_out;
829 }
830
831 len -= roundup(tlv_len, 4);
832 data += roundup(tlv_len, 4);
833 }
834
835 KASSERT(error == 0, ("unhandled error"));
836
837 parse_out:
838 if (error) {
839 device_printf(sc->sc_dev, "firmware parse error %d, "
840 "section type %d\n", error, tlv_type);
841 }
842
843 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
844 device_printf(sc->sc_dev,
845 "device uses unsupported power ops\n");
846 error = ENOTSUP;
847 }
848
849 out:
850 if (error) {
851 fw->fw_status = IWM_FW_STATUS_NONE;
852 if (fw->fw_fp != NULL)
853 iwm_fw_info_free(fw);
854 } else
855 fw->fw_status = IWM_FW_STATUS_DONE;
856 wakeup(&sc->sc_fw);
857
858 return error;
859}
860
861/*
862 * DMA resource routines
863 */
864
865#if !defined(__DragonFly__)
866static void
867iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868{
869 if (error != 0)
870 return;
871 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
872 *(bus_addr_t *)arg = segs[0].ds_addr;
873}
874#endif
875
876static int
877iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
878 bus_size_t size, bus_size_t alignment)
879{
880 int error;
881
882 dma->tag = NULL;
883 dma->map = NULL;
884 dma->size = size;
885 dma->vaddr = NULL;
886
887#if defined(__DragonFly__)
888 bus_dmamem_t dmem;
889 error = bus_dmamem_coherent(tag, alignment, 0,
890 BUS_SPACE_MAXADDR_32BIT,
891 BUS_SPACE_MAXADDR,
892 size, BUS_DMA_NOWAIT, &dmem);
893 if (error != 0)
894 goto fail;
895
896 dma->tag = dmem.dmem_tag;
897 dma->map = dmem.dmem_map;
898 dma->vaddr = dmem.dmem_addr;
899 dma->paddr = dmem.dmem_busaddr;
900#else
901 error = bus_dma_tag_create(tag, alignment,
902 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
903 1, size, 0, NULL, NULL, &dma->tag);
904 if (error != 0)
905 goto fail;
906
907 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
908 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
909 if (error != 0)
910 goto fail;
911
912 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
913 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
914 if (error != 0) {
915 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
916 dma->vaddr = NULL;
917 goto fail;
918 }
919#endif
920
921 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
922
923 return 0;
924
925fail:
926 iwm_dma_contig_free(dma);
927
928 return error;
929}
930
931static void
932iwm_dma_contig_free(struct iwm_dma_info *dma)
933{
934 if (dma->vaddr != NULL) {
935 bus_dmamap_sync(dma->tag, dma->map,
936 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
937 bus_dmamap_unload(dma->tag, dma->map);
938 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
939 dma->vaddr = NULL;
940 }
941 if (dma->tag != NULL) {
942 bus_dma_tag_destroy(dma->tag);
943 dma->tag = NULL;
944 }
945}
946
947/* fwmem is used to load firmware onto the card */
948static int
949iwm_alloc_fwmem(struct iwm_softc *sc)
950{
951 /* Must be aligned on a 16-byte boundary. */
952 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
953 sc->sc_fwdmasegsz, 16);
954}
955
956/* tx scheduler rings. not used? */
957static int
958iwm_alloc_sched(struct iwm_softc *sc)
959{
960 /* TX scheduler rings must be aligned on a 1KB boundary. */
961 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
962 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
963}
964
965/* keep-warm page is used internally by the card. see iwl-fh.h for more info */
966static int
967iwm_alloc_kw(struct iwm_softc *sc)
968{
969 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
970}
971
972/* interrupt cause table */
973static int
974iwm_alloc_ict(struct iwm_softc *sc)
975{
976 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
977 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
978}
979
980static int
981iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
982{
983 bus_size_t size;
984 int i, error;
985
986 ring->cur = 0;
987
988 /* Allocate RX descriptors (256-byte aligned). */
989 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
990 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
991 if (error != 0) {
992 device_printf(sc->sc_dev,
993 "could not allocate RX ring DMA memory\n");
994 goto fail;
995 }
996 ring->desc = ring->desc_dma.vaddr;
997
998 /* Allocate RX status area (16-byte aligned). */
999 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1000 sizeof(*ring->stat), 16);
1001 if (error != 0) {
1002 device_printf(sc->sc_dev,
1003 "could not allocate RX status DMA memory\n");
1004 goto fail;
1005 }
1006 ring->stat = ring->stat_dma.vaddr;
1007
1008 /* Create RX buffer DMA tag. */
1009#if defined(__DragonFly__)
1010 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1011 0,
1012 BUS_SPACE_MAXADDR_32BIT,
1013 BUS_SPACE_MAXADDR,
1014 NULL, NULL,
1015 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1016 BUS_DMA_NOWAIT, &ring->data_dmat);
1017#else
1018 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1019 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1020 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1021#endif
1022 if (error != 0) {
1023 device_printf(sc->sc_dev,
1024 "%s: could not create RX buf DMA tag, error %d\n",
1025 __func__, error);
1026 goto fail;
1027 }
1028
1029 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1030 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1031 if (error != 0) {
1032 device_printf(sc->sc_dev,
1033 "%s: could not create RX buf DMA map, error %d\n",
1034 __func__, error);
1035 goto fail;
1036 }
1037 /*
1038 * Allocate and map RX buffers.
1039 */
1040 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1041 struct iwm_rx_data *data = &ring->data[i];
1042 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1043 if (error != 0) {
1044 device_printf(sc->sc_dev,
1045 "%s: could not create RX buf DMA map, error %d\n",
1046 __func__, error);
1047 goto fail;
1048 }
1049 data->m = NULL;
1050
1051 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1052 goto fail;
1053 }
1054 }
1055 return 0;
1056
1057fail: iwm_free_rx_ring(sc, ring);
1058 return error;
1059}
1060
1061static void
1062iwm_disable_rx_dma(struct iwm_softc *sc)
1063{
1064 /* XXX conditional nic locks are stupid */
1065 /* XXX print out if we can't lock the NIC? */
1066 if (iwm_nic_lock(sc)) {
1067 /* XXX handle if RX stop doesn't finish? */
1068 (void) iwm_pcie_rx_stop(sc);
1069 iwm_nic_unlock(sc);
1070 }
1071}
1072
1073static void
1074iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1075{
1076 /* Reset the ring state */
1077 ring->cur = 0;
1078
1079 /*
1080 * The hw rx ring index in shared memory must also be cleared,
1081 * otherwise the discrepancy can cause reprocessing chaos.
1082 */
1083 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1084}
1085
1086static void
1087iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1088{
1089 int i;
1090
1091 iwm_dma_contig_free(&ring->desc_dma);
1092 iwm_dma_contig_free(&ring->stat_dma);
1093
1094 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1095 struct iwm_rx_data *data = &ring->data[i];
1096
1097 if (data->m != NULL) {
1098 bus_dmamap_sync(ring->data_dmat, data->map,
1099 BUS_DMASYNC_POSTREAD);
1100 bus_dmamap_unload(ring->data_dmat, data->map);
1101 m_freem(data->m);
1102 data->m = NULL;
1103 }
1104 if (data->map != NULL) {
1105 bus_dmamap_destroy(ring->data_dmat, data->map);
1106 data->map = NULL;
1107 }
1108 }
1109 if (ring->spare_map != NULL) {
1110 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1111 ring->spare_map = NULL;
1112 }
1113 if (ring->data_dmat != NULL) {
1114 bus_dma_tag_destroy(ring->data_dmat);
1115 ring->data_dmat = NULL;
1116 }
1117}
1118
1119static int
1120iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1121{
1122 bus_addr_t paddr;
1123 bus_size_t size;
1124 size_t maxsize;
1125 int nsegments;
1126 int i, error;
1127
1128 ring->qid = qid;
1129 ring->queued = 0;
1130 ring->cur = 0;
1131
1132 /* Allocate TX descriptors (256-byte aligned). */
1133 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1134 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1135 if (error != 0) {
1136 device_printf(sc->sc_dev,
1137 "could not allocate TX ring DMA memory\n");
1138 goto fail;
1139 }
1140 ring->desc = ring->desc_dma.vaddr;
1141
1142 /*
1143 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1144 * to allocate commands space for other rings.
1145 */
1146 if (qid > IWM_MVM_CMD_QUEUE)
1147 return 0;
1148
1149 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1150 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1151 if (error != 0) {
1152 device_printf(sc->sc_dev,
1153 "could not allocate TX cmd DMA memory\n");
1154 goto fail;
1155 }
1156 ring->cmd = ring->cmd_dma.vaddr;
1157
1158 /* FW commands may require more mapped space than packets. */
1159 if (qid == IWM_MVM_CMD_QUEUE) {
1160 maxsize = IWM_RBUF_SIZE;
1161 nsegments = 1;
1162 } else {
1163 maxsize = MCLBYTES;
1164 nsegments = IWM_MAX_SCATTER - 2;
1165 }
1166
1167#if defined(__DragonFly__)
1168 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1169 0,
1170 BUS_SPACE_MAXADDR_32BIT,
1171 BUS_SPACE_MAXADDR,
1172 NULL, NULL,
1173 maxsize, nsegments, maxsize,
1174 BUS_DMA_NOWAIT, &ring->data_dmat);
1175#else
1176 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1177 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1178 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1179#endif
1180 if (error != 0) {
1181 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1182 goto fail;
1183 }
1184
1185 paddr = ring->cmd_dma.paddr;
1186 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1187 struct iwm_tx_data *data = &ring->data[i];
1188
1189 data->cmd_paddr = paddr;
1190 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1191 + offsetof(struct iwm_tx_cmd, scratch);
1192 paddr += sizeof(struct iwm_device_cmd);
1193
1194 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1195 if (error != 0) {
1196 device_printf(sc->sc_dev,
1197 "could not create TX buf DMA map\n");
1198 goto fail;
1199 }
1200 }
1201 KASSERT(paddr == ring->cmd_dma.paddr + size,
1202 ("invalid physical address"));
1203 return 0;
1204
1205fail: iwm_free_tx_ring(sc, ring);
1206 return error;
1207}
1208
1209static void
1210iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1211{
1212 int i;
1213
1214 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1215 struct iwm_tx_data *data = &ring->data[i];
1216
1217 if (data->m != NULL) {
1218 bus_dmamap_sync(ring->data_dmat, data->map,
1219 BUS_DMASYNC_POSTWRITE);
1220 bus_dmamap_unload(ring->data_dmat, data->map);
1221 m_freem(data->m);
1222 data->m = NULL;
1223 }
1224 }
1225 /* Clear TX descriptors. */
1226 memset(ring->desc, 0, ring->desc_dma.size);
1227 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1228 BUS_DMASYNC_PREWRITE);
1229 sc->qfullmsk &= ~(1 << ring->qid);
1230 ring->queued = 0;
1231 ring->cur = 0;
1232}
1233
1234static void
1235iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1236{
1237 int i;
1238
1239 iwm_dma_contig_free(&ring->desc_dma);
1240 iwm_dma_contig_free(&ring->cmd_dma);
1241
1242 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1243 struct iwm_tx_data *data = &ring->data[i];
1244
1245 if (data->m != NULL) {
1246 bus_dmamap_sync(ring->data_dmat, data->map,
1247 BUS_DMASYNC_POSTWRITE);
1248 bus_dmamap_unload(ring->data_dmat, data->map);
1249 m_freem(data->m);
1250 data->m = NULL;
1251 }
1252 if (data->map != NULL) {
1253 bus_dmamap_destroy(ring->data_dmat, data->map);
1254 data->map = NULL;
1255 }
1256 }
1257 if (ring->data_dmat != NULL) {
1258 bus_dma_tag_destroy(ring->data_dmat);
1259 ring->data_dmat = NULL;
1260 }
1261}
1262
1263/*
1264 * High-level hardware frobbing routines
1265 */
1266
1267static void
1268iwm_enable_interrupts(struct iwm_softc *sc)
1269{
1270 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1271 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1272}
1273
1274static void
1275iwm_restore_interrupts(struct iwm_softc *sc)
1276{
1277 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1278}
1279
1280static void
1281iwm_disable_interrupts(struct iwm_softc *sc)
1282{
1283 /* disable interrupts */
1284 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1285
1286 /* acknowledge all interrupts */
1287 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1288 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1289}
1290
1291static void
1292iwm_ict_reset(struct iwm_softc *sc)
1293{
1294 iwm_disable_interrupts(sc);
1295
1296 /* Reset ICT table. */
1297 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1298 sc->ict_cur = 0;
1299
1300 /* Set physical address of ICT table (4KB aligned). */
1301 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1302 IWM_CSR_DRAM_INT_TBL_ENABLE
1303 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1304 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1305 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1306
1307 /* Switch to ICT interrupt mode in driver. */
1308 sc->sc_flags |= IWM_FLAG_USE_ICT;
1309
1310 /* Re-enable interrupts. */
1311 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1312 iwm_enable_interrupts(sc);
1313}
1314
1315/*
1316 * Since this .. hard-resets things, it's time to actually
1317 * mark the first vap (if any) as having no mac context.
1318 * It's annoying, but since the driver is potentially being
1319 * stop/start'ed whilst active (thanks openbsd port!) we
1320 * have to correctly track this.
1321 */
1322static void
1323iwm_stop_device(struct iwm_softc *sc)
1324{
1325 struct ieee80211com *ic = &sc->sc_ic;
1326 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1327 int chnl, qid;
1328 uint32_t mask = 0;
1329
1330 /* tell the device to stop sending interrupts */
1331 iwm_disable_interrupts(sc);
1332
1333 /*
1334 * FreeBSD-local: mark the first vap as not-uploaded,
1335 * so the next transition through auth/assoc
1336 * will correctly populate the MAC context.
1337 */
1338 if (vap) {
1339 struct iwm_vap *iv = IWM_VAP(vap);
1340 iv->is_uploaded = 0;
1341 }
1342
1343 /* device going down, Stop using ICT table */
1344 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1345
1346 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1347
1348 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1349
1350 if (iwm_nic_lock(sc)) {
1351 /* Stop each Tx DMA channel */
1352 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1353 IWM_WRITE(sc,
1354 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1355 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1356 }
1357
1358 /* Wait for DMA channels to be idle */
1359 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1360 5000)) {
1361 device_printf(sc->sc_dev,
1362 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1363 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1364 }
1365 iwm_nic_unlock(sc);
1366 }
1367 iwm_disable_rx_dma(sc);
1368
1369 /* Stop RX ring. */
1370 iwm_reset_rx_ring(sc, &sc->rxq);
1371
1372 /* Reset all TX rings. */
1373 for (qid = 0; qid < nitems(sc->txq); qid++)
1374 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1375
1376 /*
1377 * Power-down device's busmaster DMA clocks
1378 */
1379 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1380 DELAY(5);
1381
1382 /* Make sure (redundant) we've released our request to stay awake */
1383 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1384 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1385
1386 /* Stop the device, and put it in low power state */
1387 iwm_apm_stop(sc);
1388
1389 /* stop and reset the on-board processor */
1390 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1391 DELAY(1000);
1392
1393 /*
1394 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1395 * This is a bug in certain verions of the hardware.
1396 * Certain devices also keep sending HW RF kill interrupt all
1397 * the time, unless the interrupt is ACKed even if the interrupt
1398 * should be masked. Re-ACK all the interrupts here.
1399 */
1400 iwm_disable_interrupts(sc);
1401
1402 /*
1403 * Even if we stop the HW, we still want the RF kill
1404 * interrupt
1405 */
1406 iwm_enable_rfkill_int(sc);
1407 iwm_check_rfkill(sc);
1408}
1409
1410static void
1411iwm_mvm_nic_config(struct iwm_softc *sc)
1412{
1413 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1414 uint32_t reg_val = 0;
1415
1416 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1417 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1418 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1419 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1420 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1421 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1422
1423 /* SKU control */
1424 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1425 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1426 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1427 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1428
1429 /* radio configuration */
1430 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1431 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1432 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1433
1434 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1435
1436 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1437 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1438 radio_cfg_step, radio_cfg_dash);
1439
1440 /*
1441 * W/A : NIC is stuck in a reset state after Early PCIe power off
1442 * (PCIe power is lost before PERST# is asserted), causing ME FW
1443 * to lose ownership and not being able to obtain it back.
1444 */
1445 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1446 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1447 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1448 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1449 }
1450}
1451
1452static int
1453iwm_nic_rx_init(struct iwm_softc *sc)
1454{
1455 if (!iwm_nic_lock(sc))
1456 return EBUSY;
1457
1458 /*
1459 * Initialize RX ring. This is from the iwn driver.
1460 */
1461 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1462
1463 /* stop DMA */
1464 iwm_disable_rx_dma(sc);
1465 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1466 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1467 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1468 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1469
1470 /* Set physical address of RX ring (256-byte aligned). */
1471 IWM_WRITE(sc,
1472 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1473
1474 /* Set physical address of RX status (16-byte aligned). */
1475 IWM_WRITE(sc,
1476 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1477
1478#if defined(__DragonFly__)
1479 /* Force serialization (probably not needed but don't trust the HW) */
1480 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
1481#endif
1482
1483 /* Enable RX. */
1484 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1485 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1486 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1487 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1488 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1489 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1490 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1491 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1492
1493 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1494
1495 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1496 if (sc->host_interrupt_operation_mode)
1497 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1498
1499 /*
1500 * Thus sayeth el jefe (iwlwifi) via a comment:
1501 *
1502 * This value should initially be 0 (before preparing any
1503 * RBs), should be 8 after preparing the first 8 RBs (for example)
1504 */
1505 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1506
1507 iwm_nic_unlock(sc);
1508
1509 return 0;
1510}
1511
1512static int
1513iwm_nic_tx_init(struct iwm_softc *sc)
1514{
1515 int qid;
1516
1517 if (!iwm_nic_lock(sc))
1518 return EBUSY;
1519
1520 /* Deactivate TX scheduler. */
1521 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1522
1523 /* Set physical address of "keep warm" page (16-byte aligned). */
1524 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1525
1526 /* Initialize TX rings. */
1527 for (qid = 0; qid < nitems(sc->txq); qid++) {
1528 struct iwm_tx_ring *txq = &sc->txq[qid];
1529
1530 /* Set physical address of TX ring (256-byte aligned). */
1531 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1532 txq->desc_dma.paddr >> 8);
1533 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1534 "%s: loading ring %d descriptors (%p) at %lx\n",
1535 __func__,
1536 qid, txq->desc,
1537 (unsigned long) (txq->desc_dma.paddr >> 8));
1538 }
1539
1540 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1541
1542 iwm_nic_unlock(sc);
1543
1544 return 0;
1545}
1546
1547static int
1548iwm_nic_init(struct iwm_softc *sc)
1549{
1550 int error;
1551
1552 iwm_apm_init(sc);
1553 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1554 iwm_set_pwr(sc);
1555
1556 iwm_mvm_nic_config(sc);
1557
1558 if ((error = iwm_nic_rx_init(sc)) != 0)
1559 return error;
1560
1561 /*
1562 * Ditto for TX, from iwn
1563 */
1564 if ((error = iwm_nic_tx_init(sc)) != 0)
1565 return error;
1566
1567 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1568 "%s: shadow registers enabled\n", __func__);
1569 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1570
1571 return 0;
1572}
1573
1574const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1575 IWM_MVM_TX_FIFO_VO,
1576 IWM_MVM_TX_FIFO_VI,
1577 IWM_MVM_TX_FIFO_BE,
1578 IWM_MVM_TX_FIFO_BK,
1579};
1580
1581static int
1582iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1583{
1584 if (!iwm_nic_lock(sc)) {
1585 device_printf(sc->sc_dev,
1586 "%s: cannot enable txq %d\n",
1587 __func__,
1588 qid);
1589 return EBUSY;
1590 }
1591
1592 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1593
1594 if (qid == IWM_MVM_CMD_QUEUE) {
1595 /* unactivate before configuration */
1596 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1597 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1598 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1599
1600 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1601
1602 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1603
1604 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1605 /* Set scheduler window size and frame limit. */
1606 iwm_write_mem32(sc,
1607 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1608 sizeof(uint32_t),
1609 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1610 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1611 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1612 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1613
1614 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1615 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1616 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1617 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1618 IWM_SCD_QUEUE_STTS_REG_MSK);
1619 } else {
1620 struct iwm_scd_txq_cfg_cmd cmd;
1621 int error;
1622
1623 iwm_nic_unlock(sc);
1624
1625 memset(&cmd, 0, sizeof(cmd));
1626 cmd.scd_queue = qid;
1627 cmd.enable = 1;
1628 cmd.sta_id = sta_id;
1629 cmd.tx_fifo = fifo;
1630 cmd.aggregate = 0;
1631 cmd.window = IWM_FRAME_LIMIT;
1632
1633 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1634 sizeof(cmd), &cmd);
1635 if (error) {
1636 device_printf(sc->sc_dev,
1637 "cannot enable txq %d\n", qid);
1638 return error;
1639 }
1640
1641 if (!iwm_nic_lock(sc))
1642 return EBUSY;
1643 }
1644
1645 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1646 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1647
1648 iwm_nic_unlock(sc);
1649
1650 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1651 __func__, qid, fifo);
1652
1653 return 0;
1654}
1655
1656static int
1657iwm_post_alive(struct iwm_softc *sc)
1658{
1659 int nwords;
1660 int error, chnl;
1661 uint32_t base;
1662
1663 if (!iwm_nic_lock(sc))
1664 return EBUSY;
1665
1666 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1667 if (sc->sched_base != base) {
1668 device_printf(sc->sc_dev,
1669 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1670 __func__, sc->sched_base, base);
1671 }
1672
1673 iwm_ict_reset(sc);
1674
1675 /* Clear TX scheduler state in SRAM. */
1676 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1677 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1678 / sizeof(uint32_t);
1679 error = iwm_write_mem(sc,
1680 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1681 NULL, nwords);
1682 if (error)
1683 goto out;
1684
1685 /* Set physical address of TX scheduler rings (1KB aligned). */
1686 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1687
1688 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1689
1690 iwm_nic_unlock(sc);
1691
1692 /* enable command channel */
1693 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1694 if (error)
1695 return error;
1696
1697 if (!iwm_nic_lock(sc))
1698 return EBUSY;
1699
1700 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1701
1702 /* Enable DMA channels. */
1703 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1704 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1705 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1706 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1707 }
1708
1709 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1710 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1711
1712 /* Enable L1-Active */
1713 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1714 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1715 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1716 }
1717
1718 out:
1719 iwm_nic_unlock(sc);
1720 return error;
1721}
1722
1723/*
1724 * NVM read access and content parsing. We do not support
1725 * external NVM or writing NVM.
1726 * iwlwifi/mvm/nvm.c
1727 */
1728
1729#define IWM_NVM_HW_SECTION_NUM_FAMILY_7000 0
1730#define IWM_NVM_HW_SECTION_NUM_FAMILY_8000 10
1731
1732/* Default NVM size to read */
1733#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1734
1735#define IWM_NVM_WRITE_OPCODE 1
1736#define IWM_NVM_READ_OPCODE 0
1737
1738/* load nvm chunk response */
1739enum {
1740 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1741 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1742};
1743
1744static int
1745iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1746 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1747{
1748 struct iwm_nvm_access_cmd nvm_access_cmd = {
1749 .offset = htole16(offset),
1750 .length = htole16(length),
1751 .type = htole16(section),
1752 .op_code = IWM_NVM_READ_OPCODE,
1753 };
1754 struct iwm_nvm_access_resp *nvm_resp;
1755 struct iwm_rx_packet *pkt;
1756 struct iwm_host_cmd cmd = {
1757 .id = IWM_NVM_ACCESS_CMD,
1758 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1759 .data = { &nvm_access_cmd, },
1760 };
1761 int ret, bytes_read, offset_read;
1762 uint8_t *resp_data;
1763
1764 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1765
1766 ret = iwm_send_cmd(sc, &cmd);
1767 if (ret) {
1768 device_printf(sc->sc_dev,
1769 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1770 return ret;
1771 }
1772
1773 pkt = cmd.resp_pkt;
1774
1775 /* Extract NVM response */
1776 nvm_resp = (void *)pkt->data;
1777 ret = le16toh(nvm_resp->status);
1778 bytes_read = le16toh(nvm_resp->length);
1779 offset_read = le16toh(nvm_resp->offset);
1780 resp_data = nvm_resp->data;
1781 if (ret) {
1782 if ((offset != 0) &&
1783 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1784 /*
1785 * meaning of NOT_VALID_ADDRESS:
1786 * driver try to read chunk from address that is
1787 * multiple of 2K and got an error since addr is empty.
1788 * meaning of (offset != 0): driver already
1789 * read valid data from another chunk so this case
1790 * is not an error.
1791 */
1792 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1793 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1794 offset);
1795 *len = 0;
1796 ret = 0;
1797 } else {
1798 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1799 "NVM access command failed with status %d\n", ret);
1800 ret = EIO;
1801 }
1802 goto exit;
1803 }
1804
1805 if (offset_read != offset) {
1806 device_printf(sc->sc_dev,
1807 "NVM ACCESS response with invalid offset %d\n",
1808 offset_read);
1809 ret = EINVAL;
1810 goto exit;
1811 }
1812
1813 if (bytes_read > length) {
1814 device_printf(sc->sc_dev,
1815 "NVM ACCESS response with too much data "
1816 "(%d bytes requested, %d bytes received)\n",
1817 length, bytes_read);
1818 ret = EINVAL;
1819 goto exit;
1820 }
1821
1822 /* Write data to NVM */
1823 memcpy(data + offset, resp_data, bytes_read);
1824 *len = bytes_read;
1825
1826 exit:
1827 iwm_free_resp(sc, &cmd);
1828 return ret;
1829}
1830
1831/*
1832 * Reads an NVM section completely.
1833 * NICs prior to 7000 family don't have a real NVM, but just read
1834 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1835 * by uCode, we need to manually check in this case that we don't
1836 * overflow and try to read more than the EEPROM size.
1837 * For 7000 family NICs, we supply the maximal size we can read, and
1838 * the uCode fills the response with as much data as we can,
1839 * without overflowing, so no check is needed.
1840 */
1841static int
1842iwm_nvm_read_section(struct iwm_softc *sc,
1843 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1844{
1845 uint16_t seglen, length, offset = 0;
1846 int ret;
1847
1848 /* Set nvm section read length */
1849 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1850
1851 seglen = length;
1852
1853 /* Read the NVM until exhausted (reading less than requested) */
1854 while (seglen == length) {
1855 /* Check no memory assumptions fail and cause an overflow */
1856 if ((size_read + offset + length) >
1857 sc->eeprom_size) {
1858 device_printf(sc->sc_dev,
1859 "EEPROM size is too small for NVM\n");
1860 return ENOBUFS;
1861 }
1862
1863 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1864 if (ret) {
1865 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1866 "Cannot read NVM from section %d offset %d, length %d\n",
1867 section, offset, length);
1868 return ret;
1869 }
1870 offset += seglen;
1871 }
1872
1873 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1874 "NVM section %d read completed\n", section);
1875 *len = offset;
1876 return 0;
1877}
1878
1879/* NVM offsets (in words) definitions */
1880enum iwm_nvm_offsets {
1881 /* NVM HW-Section offset (in words) definitions */
1882 IWM_HW_ADDR = 0x15,
1883
1884/* NVM SW-Section offset (in words) definitions */
1885 IWM_NVM_SW_SECTION = 0x1C0,
1886 IWM_NVM_VERSION = 0,
1887 IWM_RADIO_CFG = 1,
1888 IWM_SKU = 2,
1889 IWM_N_HW_ADDRS = 3,
1890 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1891
1892/* NVM calibration section offset (in words) definitions */
1893 IWM_NVM_CALIB_SECTION = 0x2B8,
1894 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1895};
1896
1897enum iwm_8000_nvm_offsets {
1898 /* NVM HW-Section offset (in words) definitions */
1899 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1900 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1901 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1902 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1903 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1904
1905 /* NVM SW-Section offset (in words) definitions */
1906 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1907 IWM_NVM_VERSION_8000 = 0,
1908 IWM_RADIO_CFG_8000 = 0,
1909 IWM_SKU_8000 = 2,
1910 IWM_N_HW_ADDRS_8000 = 3,
1911
1912 /* NVM REGULATORY -Section offset (in words) definitions */
1913 IWM_NVM_CHANNELS_8000 = 0,
1914 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1915 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1916 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1917
1918 /* NVM calibration section offset (in words) definitions */
1919 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1920 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1921};
1922
1923/* SKU Capabilities (actual values from NVM definition) */
1924enum nvm_sku_bits {
1925 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1926 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1927 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1928 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1929};
1930
1931/* radio config bits (actual values from NVM definition) */
1932#define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1933#define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1934#define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1935#define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1936#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1937#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1938
1939#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1940#define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1941#define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1942#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1943#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1944#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1945
1946#define DEFAULT_MAX_TX_POWER 16
1947
1948/**
1949 * enum iwm_nvm_channel_flags - channel flags in NVM
1950 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1951 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1952 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1953 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1954 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1955 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1956 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1957 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1958 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1959 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1960 */
1961enum iwm_nvm_channel_flags {
1962 IWM_NVM_CHANNEL_VALID = (1 << 0),
1963 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1964 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1965 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1966 IWM_NVM_CHANNEL_DFS = (1 << 7),
1967 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1968 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1969 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1970 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1971};
1972
1973/* lower blocks contain EEPROM image and calibration data */
1974#define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(uint16_t)) /* 16 KB */
1975#define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(uint16_t)) /* 32 KB */
1976
1977/*
1978 * Translate EEPROM flags to net80211.
1979 */
1980static uint32_t
1981iwm_eeprom_channel_flags(uint16_t ch_flags)
1982{
1983 uint32_t nflags;
1984
1985 nflags = 0;
1986 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1987 nflags |= IEEE80211_CHAN_PASSIVE;
1988 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1989 nflags |= IEEE80211_CHAN_NOADHOC;
1990 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1991 nflags |= IEEE80211_CHAN_DFS;
1992 /* Just in case. */
1993 nflags |= IEEE80211_CHAN_NOADHOC;
1994 }
1995
1996 return (nflags);
1997}
1998
1999static void
2000iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2001 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2002 const uint8_t bands[])
2003{
2004 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2005 uint32_t nflags;
2006 uint16_t ch_flags;
2007 uint8_t ieee;
2008 int error;
2009
2010 for (; ch_idx < ch_num; ch_idx++) {
2011 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2012 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2013 ieee = iwm_nvm_channels[ch_idx];
2014 else
2015 ieee = iwm_nvm_channels_8000[ch_idx];
2016
2017 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2018 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2019 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2020 ieee, ch_flags,
2021 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2022 "5.2" : "2.4");
2023 continue;
2024 }
2025
2026 nflags = iwm_eeprom_channel_flags(ch_flags);
2027 error = ieee80211_add_channel(chans, maxchans, nchans,
2028 ieee, 0, 0, nflags, bands);
2029 if (error != 0)
2030 break;
2031
2032 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2033 "Ch. %d Flags %x [%sGHz] - Added\n",
2034 ieee, ch_flags,
2035 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2036 "5.2" : "2.4");
2037 }
2038}
2039
2040static void
2041iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2042 struct ieee80211_channel chans[])
2043{
2044 struct iwm_softc *sc = ic->ic_softc;
2045 struct iwm_nvm_data *data = sc->nvm_data;
2046 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
2047 size_t ch_num;
2048
2049 memset(bands, 0, sizeof(bands));
2050 /* 1-13: 11b/g channels. */
2051 setbit(bands, IEEE80211_MODE_11B);
2052 setbit(bands, IEEE80211_MODE_11G);
2053 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2054 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2055
2056 /* 14: 11b channel only. */
2057 clrbit(bands, IEEE80211_MODE_11G);
2058 iwm_add_channel_band(sc, chans, maxchans, nchans,
2059 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2060
2061 if (data->sku_cap_band_52GHz_enable) {
2062 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2063 ch_num = nitems(iwm_nvm_channels);
2064 else
2065 ch_num = nitems(iwm_nvm_channels_8000);
2066 memset(bands, 0, sizeof(bands));
2067 setbit(bands, IEEE80211_MODE_11A);
2068 iwm_add_channel_band(sc, chans, maxchans, nchans,
2069 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2070 }
2071}
2072
2073static void
2074iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2075 const uint16_t *mac_override, const uint16_t *nvm_hw)
2076{
2077 const uint8_t *hw_addr;
2078
2079 if (mac_override) {
2080 static const uint8_t reserved_mac[] = {
2081 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2082 };
2083
2084 hw_addr = (const uint8_t *)(mac_override +
2085 IWM_MAC_ADDRESS_OVERRIDE_8000);
2086
2087 /*
2088 * Store the MAC address from MAO section.
2089 * No byte swapping is required in MAO section
2090 */
2091 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2092
2093 /*
2094 * Force the use of the OTP MAC address in case of reserved MAC
2095 * address in the NVM, or if address is given but invalid.
2096 */
2097 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2098 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2099 iwm_is_valid_ether_addr(data->hw_addr) &&
2100 !IEEE80211_IS_MULTICAST(data->hw_addr))
2101 return;
2102
2103 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2104 "%s: mac address from nvm override section invalid\n",
2105 __func__);
2106 }
2107
2108 if (nvm_hw) {
2109 /* read the mac address from WFMP registers */
2110 uint32_t mac_addr0 =
2111 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2112 uint32_t mac_addr1 =
2113 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2114
2115 hw_addr = (const uint8_t *)&mac_addr0;
2116 data->hw_addr[0] = hw_addr[3];
2117 data->hw_addr[1] = hw_addr[2];
2118 data->hw_addr[2] = hw_addr[1];
2119 data->hw_addr[3] = hw_addr[0];
2120
2121 hw_addr = (const uint8_t *)&mac_addr1;
2122 data->hw_addr[4] = hw_addr[1];
2123 data->hw_addr[5] = hw_addr[0];
2124
2125 return;
2126 }
2127
2128 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2129 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2130}
2131
2132static int
2133iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2134 const uint16_t *phy_sku)
2135{
2136 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2137 return le16_to_cpup(nvm_sw + IWM_SKU);
2138
2139 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2140}
2141
2142static int
2143iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2144{
2145 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2146 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2147 else
2148 return le32_to_cpup((const uint32_t *)(nvm_sw +
2149 IWM_NVM_VERSION_8000));
2150}
2151
2152static int
2153iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2154 const uint16_t *phy_sku)
2155{
2156 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2157 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2158
2159 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2160}
2161
2162static int
2163iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2164{
2165 int n_hw_addr;
2166
2167 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2168 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2169
2170 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2171
2172 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2173}
2174
2175static void
2176iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2177 uint32_t radio_cfg)
2178{
2179 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2180 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2181 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2182 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2183 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2184 return;
2185 }
2186
2187 /* set the radio configuration for family 8000 */
2188 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2189 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2190 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2191 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2192 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2193 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2194}
2195
2196static int
2197iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2198 const uint16_t *nvm_hw, const uint16_t *mac_override)
2199{
2200#ifdef notyet /* for FAMILY 9000 */
2201 if (cfg->mac_addr_from_csr) {
2202 iwm_set_hw_address_from_csr(sc, data);
2203 } else
2204#endif
2205 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2206 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2207
2208 /* The byte order is little endian 16 bit, meaning 214365 */
2209 data->hw_addr[0] = hw_addr[1];
2210 data->hw_addr[1] = hw_addr[0];
2211 data->hw_addr[2] = hw_addr[3];
2212 data->hw_addr[3] = hw_addr[2];
2213 data->hw_addr[4] = hw_addr[5];
2214 data->hw_addr[5] = hw_addr[4];
2215 } else {
2216 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2217 }
2218
2219 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2220 device_printf(sc->sc_dev, "no valid mac address was found\n");
2221 return EINVAL;
2222 }
2223
2224 return 0;
2225}
2226
2227static struct iwm_nvm_data *
2228iwm_parse_nvm_data(struct iwm_softc *sc,
2229 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2230 const uint16_t *nvm_calib, const uint16_t *mac_override,
2231 const uint16_t *phy_sku, const uint16_t *regulatory)
2232{
2233 struct iwm_nvm_data *data;
2234 uint32_t sku, radio_cfg;
2235
2236 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2237 data = kmalloc(sizeof(*data) +
2238 IWM_NUM_CHANNELS * sizeof(uint16_t),
2239 M_DEVBUF, M_WAITOK | M_ZERO);
2240 } else {
2241 data = kmalloc(sizeof(*data) +
2242 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2243 M_DEVBUF, M_WAITOK | M_ZERO);
2244 }
2245 if (!data)
2246 return NULL;
2247
2248 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2249
2250 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2251 iwm_set_radio_cfg(sc, data, radio_cfg);
2252
2253 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2254 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2255 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2256 data->sku_cap_11n_enable = 0;
2257
2258 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2259
2260 /* If no valid mac address was found - bail out */
2261 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2262 kfree(data, M_DEVBUF);
2263 return NULL;
2264 }
2265
2266 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2267 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2268 IWM_NUM_CHANNELS * sizeof(uint16_t));
2269 } else {
2270 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2271 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2272 }
2273
2274 return data;
2275}
2276
2277static void
2278iwm_free_nvm_data(struct iwm_nvm_data *data)
2279{
2280 if (data != NULL)
2281 kfree(data, M_DEVBUF);
2282}
2283
2284static struct iwm_nvm_data *
2285iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2286{
2287 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2288
2289 /* Checking for required sections */
2290 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2291 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2292 !sections[sc->nvm_hw_section_num].data) {
2293 device_printf(sc->sc_dev,
2294 "Can't parse empty OTP/NVM sections\n");
2295 return NULL;
2296 }
2297 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2298 /* SW and REGULATORY sections are mandatory */
2299 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2300 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2301 device_printf(sc->sc_dev,
2302 "Can't parse empty OTP/NVM sections\n");
2303 return NULL;
2304 }
2305 /* MAC_OVERRIDE or at least HW section must exist */
2306 if (!sections[sc->nvm_hw_section_num].data &&
2307 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2308 device_printf(sc->sc_dev,
2309 "Can't parse mac_address, empty sections\n");
2310 return NULL;
2311 }
2312
2313 /* PHY_SKU section is mandatory in B0 */
2314 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2315 device_printf(sc->sc_dev,
2316 "Can't parse phy_sku in B0, empty sections\n");
2317 return NULL;
2318 }
2319 } else {
2320 panic("unknown device family %d\n", sc->sc_device_family);
2321 }
2322
2323 hw = (const uint16_t *) sections[sc->nvm_hw_section_num].data;
2324 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2325 calib = (const uint16_t *)
2326 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2327 regulatory = (const uint16_t *)
2328 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2329 mac_override = (const uint16_t *)
2330 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2331 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2332
2333 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2334 phy_sku, regulatory);
2335}
2336
2337static int
2338iwm_nvm_init(struct iwm_softc *sc)
2339{
2340 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2341 int i, ret, section;
2342 uint32_t size_read = 0;
2343 uint8_t *nvm_buffer, *temp;
2344 uint16_t len;
2345
2346 memset(nvm_sections, 0, sizeof(nvm_sections));
2347
2348 if (sc->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2349 return EINVAL;
2350
2351 /* load NVM values from nic */
2352 /* Read From FW NVM */
2353 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2354
2355 nvm_buffer = kmalloc(sc->eeprom_size, M_DEVBUF, M_INTWAIT | M_ZERO);
2356 if (!nvm_buffer)
2357 return ENOMEM;
2358 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2359 /* we override the constness for initial read */
2360 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2361 &len, size_read);
2362 if (ret)
2363 continue;
2364 size_read += len;
2365 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2366 if (!temp) {
2367 ret = ENOMEM;
2368 break;
2369 }
2370 memcpy(temp, nvm_buffer, len);
2371
2372 nvm_sections[section].data = temp;
2373 nvm_sections[section].length = len;
2374 }
2375 if (!size_read)
2376 device_printf(sc->sc_dev, "OTP is blank\n");
2377 kfree(nvm_buffer, M_DEVBUF);
2378
2379 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2380 if (!sc->nvm_data)
2381 return EINVAL;
2382 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2383 "nvm version = %x\n", sc->nvm_data->nvm_version);
2384
2385 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2386 if (nvm_sections[i].data != NULL)
2387 kfree(nvm_sections[i].data, M_DEVBUF);
2388 }
2389
2390 return 0;
2391}
2392
2393/*
2394 * Firmware loading gunk. This is kind of a weird hybrid between the
2395 * iwn driver and the Linux iwlwifi driver.
2396 */
2397
2398static int
2399iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2400 const uint8_t *section, uint32_t byte_cnt)
2401{
2402 int error = EINVAL;
2403 uint32_t chunk_sz, offset;
2404
2405 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2406
2407 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2408 uint32_t addr, len;
2409 const uint8_t *data;
2410
2411 addr = dst_addr + offset;
2412 len = MIN(chunk_sz, byte_cnt - offset);
2413 data = section + offset;
2414
2415 error = iwm_firmware_load_chunk(sc, addr, data, len);
2416 if (error)
2417 break;
2418 }
2419
2420 return error;
2421}
2422
2423static int
2424iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2425 const uint8_t *chunk, uint32_t byte_cnt)
2426{
2427 struct iwm_dma_info *dma = &sc->fw_dma;
2428 int error;
2429
2430 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2431 memcpy(dma->vaddr, chunk, byte_cnt);
2432 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2433
2434 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2435 dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2436 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2437 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2438 }
2439
2440 sc->sc_fw_chunk_done = 0;
2441
2442 if (!iwm_nic_lock(sc))
2443 return EBUSY;
2444
2445 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2446 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2447 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2448 dst_addr);
2449 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2450 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2451 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2452 (iwm_get_dma_hi_addr(dma->paddr)
2453 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2454 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2455 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2456 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2457 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2458 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2459 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2460 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2461 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2462
2463 iwm_nic_unlock(sc);
2464
2465 /* wait 1s for this segment to load */
2466 error = 0;
2467 while (!sc->sc_fw_chunk_done) {
2468#if defined(__DragonFly__)
2469 error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
2470#else
2471 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2472#endif
2473 if (error)
2474 break;
2475 }
2476
2477 if (!sc->sc_fw_chunk_done) {
2478 device_printf(sc->sc_dev,
2479 "fw chunk addr 0x%x len %d failed to load\n",
2480 dst_addr, byte_cnt);
2481 }
2482
2483 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2484 dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2485 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2486 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2487 iwm_nic_unlock(sc);
2488 }
2489
2490 return error;
2491}
2492
2493int
2494iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2495 int cpu, int *first_ucode_section)
2496{
2497 int shift_param;
2498 int i, error = 0, sec_num = 0x1;
2499 uint32_t val, last_read_idx = 0;
2500 const void *data;
2501 uint32_t dlen;
2502 uint32_t offset;
2503
2504 if (cpu == 1) {
2505 shift_param = 0;
2506 *first_ucode_section = 0;
2507 } else {
2508 shift_param = 16;
2509 (*first_ucode_section)++;
2510 }
2511
2512 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2513 last_read_idx = i;
2514 data = fws->fw_sect[i].fws_data;
2515 dlen = fws->fw_sect[i].fws_len;
2516 offset = fws->fw_sect[i].fws_devoff;
2517
2518 /*
2519 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2520 * CPU1 to CPU2.
2521 * PAGING_SEPARATOR_SECTION delimiter - separate between
2522 * CPU2 non paged to CPU2 paging sec.
2523 */
2524 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2525 offset == IWM_PAGING_SEPARATOR_SECTION)
2526 break;
2527
2528 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2529 "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2530 i, offset, dlen, cpu);
2531
2532 if (dlen > sc->sc_fwdmasegsz) {
2533 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2534 "chunk %d too large (%d bytes)\n", i, dlen);
2535 error = EFBIG;
2536 } else {
2537 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2538 }
2539 if (error) {
2540 device_printf(sc->sc_dev,
2541 "could not load firmware chunk %d (error %d)\n",
2542 i, error);
2543 return error;
2544 }
2545
2546 /* Notify the ucode of the loaded section number and status */
2547 if (iwm_nic_lock(sc)) {
2548 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2549 val = val | (sec_num << shift_param);
2550 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2551 sec_num = (sec_num << 1) | 0x1;
2552 iwm_nic_unlock(sc);
2553
2554 /*
2555 * The firmware won't load correctly without this delay.
2556 */
2557 DELAY(8000);
2558 }
2559 }
2560
2561 *first_ucode_section = last_read_idx;
2562
2563 if (iwm_nic_lock(sc)) {
2564 if (cpu == 1)
2565 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2566 else
2567 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2568 iwm_nic_unlock(sc);
2569 }
2570
2571 return 0;
2572}
2573
2574int
2575iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2576{
2577 struct iwm_fw_sects *fws;
2578 int error = 0;
2579 int first_ucode_section;
2580
2581 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2582 ucode_type);
2583
2584 fws = &sc->sc_fw.fw_sects[ucode_type];
2585
2586 /* configure the ucode to be ready to get the secured image */
2587 /* release CPU reset */
2588 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2589
2590 /* load to FW the binary Secured sections of CPU1 */
2591 error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2592 if (error)
2593 return error;
2594
2595 /* load to FW the binary sections of CPU2 */
2596 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2597}
2598
2599static int
2600iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2601{
2602 struct iwm_fw_sects *fws;
2603 int error, i;
2604 const void *data;
2605 uint32_t dlen;
2606 uint32_t offset;
2607
2608 sc->sc_uc.uc_intr = 0;
2609
2610 fws = &sc->sc_fw.fw_sects[ucode_type];
2611 for (i = 0; i < fws->fw_count; i++) {
2612 data = fws->fw_sect[i].fws_data;
2613 dlen = fws->fw_sect[i].fws_len;
2614 offset = fws->fw_sect[i].fws_devoff;
2615 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2616 "LOAD FIRMWARE type %d offset %u len %d\n",
2617 ucode_type, offset, dlen);
2618 if (dlen > sc->sc_fwdmasegsz) {
2619 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2620 "chunk %d too large (%d bytes)\n", i, dlen);
2621 error = EFBIG;
2622 } else {
2623 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2624 }
2625 if (error) {
2626 device_printf(sc->sc_dev,
2627 "could not load firmware chunk %u of %u "
2628 "(error=%d)\n", i, fws->fw_count, error);
2629 return error;
2630 }
2631 }
2632
2633 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2634
2635 return 0;
2636}
2637
2638static int
2639iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2640{
2641 int error, w;
2642
2643 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2644 error = iwm_load_firmware_8000(sc, ucode_type);
2645 else
2646 error = iwm_load_firmware_7000(sc, ucode_type);
2647 if (error)
2648 return error;
2649
2650 /* wait for the firmware to load */
2651 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2652#if defined(__DragonFly__)
2653 error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
2654#else
2655 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2656#endif
2657 }
2658 if (error || !sc->sc_uc.uc_ok) {
2659 device_printf(sc->sc_dev, "could not load firmware\n");
2660 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2661 device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2662 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2663 device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2664 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2665 }
2666 }
2667
2668 /*
2669 * Give the firmware some time to initialize.
2670 * Accessing it too early causes errors.
2671 */
2672 lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
2673
2674 return error;
2675}
2676
2677static int
2678iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2679{
2680 int error;
2681
2682 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2683
2684 if ((error = iwm_nic_init(sc)) != 0) {
2685 device_printf(sc->sc_dev, "unable to init nic\n");
2686 return error;
2687 }
2688
2689 /* make sure rfkill handshake bits are cleared */
2690 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2691 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2692 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2693
2694 /* clear (again), then enable host interrupts */
2695 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2696 iwm_enable_interrupts(sc);
2697
2698 /* really make sure rfkill handshake bits are cleared */
2699 /* maybe we should write a few times more? just to make sure */
2700 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2701 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702
2703 /* Load the given image to the HW */
2704 return iwm_load_firmware(sc, ucode_type);
2705}
2706
2707static int
2708iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2709{
2710 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2711 .valid = htole32(valid_tx_ant),
2712 };
2713
2714 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2715 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2716}
2717
2718static int
2719iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2720{
2721 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2722 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2723
2724 /* Set parameters */
2725 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2726 phy_cfg_cmd.calib_control.event_trigger =
2727 sc->sc_default_calib[ucode_type].event_trigger;
2728 phy_cfg_cmd.calib_control.flow_trigger =
2729 sc->sc_default_calib[ucode_type].flow_trigger;
2730
2731 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2732 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2733 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2734 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2735}
2736
2737static int
2738iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2739 enum iwm_ucode_type ucode_type)
2740{
2741 enum iwm_ucode_type old_type = sc->sc_uc_current;
2742 int error;
2743
2744 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2745 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2746 error);
2747 return error;
2748 }
2749
2750 sc->sc_uc_current = ucode_type;
2751 error = iwm_start_fw(sc, ucode_type);
2752 if (error) {
2753 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2754 sc->sc_uc_current = old_type;
2755 return error;
2756 }
2757
2758 error = iwm_post_alive(sc);
2759 if (error) {
2760 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2761 }
2762 return error;
2763}
2764
2765/*
2766 * mvm misc bits
2767 */
2768
2769static int
2770iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2771{
2772 int error;
2773
2774 /* do not operate with rfkill switch turned on */
2775 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2776 device_printf(sc->sc_dev,
2777 "radio is disabled by hardware switch\n");
2778 return EPERM;
2779 }
2780
2781 sc->sc_init_complete = 0;
2782 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2783 IWM_UCODE_TYPE_INIT)) != 0) {
2784 device_printf(sc->sc_dev, "failed to load init firmware\n");
2785 return error;
2786 }
2787
2788 if (justnvm) {
2789 if ((error = iwm_nvm_init(sc)) != 0) {
2790 device_printf(sc->sc_dev, "failed to read nvm\n");
2791 return error;
2792 }
2793 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2794
2795 return 0;
2796 }
2797
2798 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2799 device_printf(sc->sc_dev,
2800 "failed to send bt coex configuration: %d\n", error);
2801 return error;
2802 }
2803
2804 /* Init Smart FIFO. */
2805 error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2806 if (error != 0)
2807 return error;
2808
2809 /* Send TX valid antennas before triggering calibrations */
2810 if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2811 device_printf(sc->sc_dev,
2812 "failed to send antennas before calibration: %d\n", error);
2813 return error;
2814 }
2815
2816 /*
2817 * Send phy configurations command to init uCode
2818 * to start the 16.0 uCode init image internal calibrations.
2819 */
2820 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2821 device_printf(sc->sc_dev,
2822 "%s: failed to run internal calibration: %d\n",
2823 __func__, error);
2824 return error;
2825 }
2826
2827 /*
2828 * Nothing to do but wait for the init complete notification
2829 * from the firmware
2830 */
2831 while (!sc->sc_init_complete) {
2832#if defined(__DragonFly__)
2833 error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
2834 0, "iwminit", 2*hz);
2835#else
2836 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2837 0, "iwminit", 2*hz);
2838#endif
2839 if (error) {
2840 device_printf(sc->sc_dev, "init complete failed: %d\n",
2841 sc->sc_init_complete);
2842 break;
2843 }
2844 }
2845
2846 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2847 sc->sc_init_complete ? "" : "not ");
2848
2849 return error;
2850}
2851
2852/*
2853 * receive side
2854 */
2855
2856/* (re)stock rx ring, called at init-time and at runtime */
2857static int
2858iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2859{
2860 struct iwm_rx_ring *ring = &sc->rxq;
2861 struct iwm_rx_data *data = &ring->data[idx];
2862 struct mbuf *m;
2863 bus_dmamap_t dmamap = NULL;
2864 bus_dma_segment_t seg;
2865 int nsegs, error;
2866
2867 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2868 if (m == NULL)
2869 return ENOBUFS;
2870
2871 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2872#if defined(__DragonFly__)
2873 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2874 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2875#else
2876 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2877 &seg, &nsegs, BUS_DMA_NOWAIT);
2878#endif
2879 if (error != 0) {
2880 device_printf(sc->sc_dev,
2881 "%s: can't map mbuf, error %d\n", __func__, error);
2882 goto fail;
2883 }
2884
2885 if (data->m != NULL)
2886 bus_dmamap_unload(ring->data_dmat, data->map);
2887
2888 /* Swap ring->spare_map with data->map */
2889 dmamap = data->map;
2890 data->map = ring->spare_map;
2891 ring->spare_map = dmamap;
2892
2893 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2894 data->m = m;
2895
2896 /* Update RX descriptor. */
2897 KKASSERT((seg.ds_addr & 255) == 0);
2898 ring->desc[idx] = htole32(seg.ds_addr >> 8);
2899 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2900 BUS_DMASYNC_PREWRITE);
2901
2902 return 0;
2903fail:
2904 m_freem(m);
2905 return error;
2906}
2907
2908#define IWM_RSSI_OFFSET 50
2909static int
2910iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2911{
2912 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2913 uint32_t agc_a, agc_b;
2914 uint32_t val;
2915
2916 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2917 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2918 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2919
2920 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2921 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2922 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2923
2924 /*
2925 * dBm = rssi dB - agc dB - constant.
2926 * Higher AGC (higher radio gain) means lower signal.
2927 */
2928 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2929 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2930 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2931
2932 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2933 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2934 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2935
2936 return max_rssi_dbm;
2937}
2938
2939/*
2940 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2941 * values are reported by the fw as positive values - need to negate
2942 * to obtain their dBM. Account for missing antennas by replacing 0
2943 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2944 */
2945static int
2946iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2947{
2948 int energy_a, energy_b, energy_c, max_energy;
2949 uint32_t val;
2950
2951 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2952 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2953 IWM_RX_INFO_ENERGY_ANT_A_POS;
2954 energy_a = energy_a ? -energy_a : -256;
2955 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2956 IWM_RX_INFO_ENERGY_ANT_B_POS;
2957 energy_b = energy_b ? -energy_b : -256;
2958 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2959 IWM_RX_INFO_ENERGY_ANT_C_POS;
2960 energy_c = energy_c ? -energy_c : -256;
2961 max_energy = MAX(energy_a, energy_b);
2962 max_energy = MAX(max_energy, energy_c);
2963
2964 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2965 "energy In A %d B %d C %d , and max %d\n",
2966 energy_a, energy_b, energy_c, max_energy);
2967
2968 return max_energy;
2969}
2970
2971static void
2972iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2973 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2974{
2975 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2976
2977 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2978 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2979
2980 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2981}
2982
2983/*
2984 * Retrieve the average noise (in dBm) among receivers.
2985 */
2986static int
2987iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2988{
2989 int i, total, nbant, noise;
2990
2991 total = nbant = noise = 0;
2992 for (i = 0; i < 3; i++) {
2993 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2994 if (noise) {
2995 total += noise;
2996 nbant++;
2997 }
2998 }
2999
3000 /* There should be at least one antenna but check anyway. */
3001 return (nbant == 0) ? -127 : (total / nbant) - 107;
3002}
3003
3004/*
3005 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3006 *
3007 * Handles the actual data of the Rx packet from the fw
3008 */
3009static void
3010iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3011 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3012{
3013 struct ieee80211com *ic = &sc->sc_ic;
3014 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3015 struct ieee80211_frame *wh;
3016 struct ieee80211_node *ni;
3017 struct ieee80211_rx_stats rxs;
3018 struct mbuf *m;
3019 struct iwm_rx_phy_info *phy_info;
3020 struct iwm_rx_mpdu_res_start *rx_res;
3021 uint32_t len;
3022 uint32_t rx_pkt_status;
3023 int rssi;
3024
3025 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3026
3027 phy_info = &sc->sc_last_phy_info;
3028 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3029 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3030 len = le16toh(rx_res->byte_count);
3031 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3032
3033 m = data->m;
3034 m->m_data = pkt->data + sizeof(*rx_res);
3035 m->m_pkthdr.len = m->m_len = len;
3036
3037 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3038 device_printf(sc->sc_dev,
3039 "dsp size out of range [0,20]: %d\n",
3040 phy_info->cfg_phy_cnt);
3041 return;
3042 }
3043
3044 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3045 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3046 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3047 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3048 return; /* drop */
3049 }
3050
3051 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3052 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3053 } else {
3054 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3055 }
3056 rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
3057 rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */
3058
3059 /* replenish ring for the buffer we're going to feed to the sharks */
3060 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3061 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3062 __func__);
3063 return;
3064 }
3065
3066 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3067
3068 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3069 "%s: phy_info: channel=%d, flags=0x%08x\n",
3070 __func__,
3071 le16toh(phy_info->channel),
3072 le16toh(phy_info->phy_flags));
3073
3074 /*
3075 * Populate an RX state struct with the provided information.
3076 */
3077 bzero(&rxs, sizeof(rxs));
3078 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3079 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3080 rxs.c_ieee = le16toh(phy_info->channel);
3081 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3082 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3083 } else {
3084 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3085 }
3086 rxs.rssi = rssi - sc->sc_noise;
3087 rxs.nf = sc->sc_noise;
3088
3089 if (ieee80211_radiotap_active_vap(vap)) {
3090 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3091
3092 tap->wr_flags = 0;
3093 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3094 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3095 tap->wr_chan_freq = htole16(rxs.c_freq);
3096 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3097 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3098 tap->wr_dbm_antsignal = (int8_t)rssi;
3099 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3100 tap->wr_tsft = phy_info->system_timestamp;
3101 switch (phy_info->rate) {
3102 /* CCK rates. */
3103 case 10: tap->wr_rate = 2; break;
3104 case 20: tap->wr_rate = 4; break;
3105 case 55: tap->wr_rate = 11; break;
3106 case 110: tap->wr_rate = 22; break;
3107 /* OFDM rates. */
3108 case 0xd: tap->wr_rate = 12; break;
3109 case 0xf: tap->wr_rate = 18; break;
3110 case 0x5: tap->wr_rate = 24; break;
3111 case 0x7: tap->wr_rate = 36; break;
3112 case 0x9: tap->wr_rate = 48; break;
3113 case 0xb: tap->wr_rate = 72; break;
3114 case 0x1: tap->wr_rate = 96; break;
3115 case 0x3: tap->wr_rate = 108; break;
3116 /* Unknown rate: should not happen. */
3117 default: tap->wr_rate = 0;
3118 }
3119 }
3120
3121 IWM_UNLOCK(sc);
3122 if (ni != NULL) {
3123 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3124 ieee80211_input_mimo(ni, m, &rxs);
3125 ieee80211_free_node(ni);
3126 } else {
3127 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3128 ieee80211_input_mimo_all(ic, m, &rxs);
3129 }
3130 IWM_LOCK(sc);
3131}
3132
3133static int
3134iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3135 struct iwm_node *in)
3136{
3137 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3138 struct ieee80211_node *ni = &in->in_ni;
3139 struct ieee80211vap *vap = ni->ni_vap;
3140 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3141 int failack = tx_resp->failure_frame;
3142
3143 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3144
3145 /* Update rate control statistics. */
3146 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3147 __func__,
3148 (int) le16toh(tx_resp->status.status),
3149 (int) le16toh(tx_resp->status.sequence),
3150 tx_resp->frame_count,
3151 tx_resp->bt_kill_count,
3152 tx_resp->failure_rts,
3153 tx_resp->failure_frame,
3154 le32toh(tx_resp->initial_rate),
3155 (int) le16toh(tx_resp->wireless_media_time));
3156
3157 if (status != IWM_TX_STATUS_SUCCESS &&
3158 status != IWM_TX_STATUS_DIRECT_DONE) {
3159 ieee80211_ratectl_tx_complete(vap, ni,
3160 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3161 return (1);
3162 } else {
3163 ieee80211_ratectl_tx_complete(vap, ni,
3164 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3165 return (0);
3166 }
3167}
3168
3169static void
3170iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3171 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3172{
3173 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3174 int idx = cmd_hdr->idx;
3175 int qid = cmd_hdr->qid;
3176 struct iwm_tx_ring *ring = &sc->txq[qid];
3177 struct iwm_tx_data *txd = &ring->data[idx];
3178 struct iwm_node *in = txd->in;
3179 struct mbuf *m = txd->m;
3180 int status;
3181
3182 KASSERT(txd->done == 0, ("txd not done"));
3183 KASSERT(txd->in != NULL, ("txd without node"));
3184 KASSERT(txd->m != NULL, ("txd without mbuf"));
3185
3186 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3187
3188 sc->sc_tx_timer = 0;
3189
3190 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3191
3192 /* Unmap and free mbuf. */
3193 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3194 bus_dmamap_unload(ring->data_dmat, txd->map);
3195
3196 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3197 "free txd %p, in %p\n", txd, txd->in);
3198 txd->done = 1;
3199 txd->m = NULL;
3200 txd->in = NULL;
3201
3202 ieee80211_tx_complete(&in->in_ni, m, status);
3203
3204 if (--ring->queued < IWM_TX_RING_LOMARK) {
3205 sc->qfullmsk &= ~(1 << ring->qid);
3206 if (sc->qfullmsk == 0) {
3207 /*
3208 * Well, we're in interrupt context, but then again
3209 * I guess net80211 does all sorts of stunts in
3210 * interrupt context, so maybe this is no biggie.
3211 */
3212 iwm_start(sc);
3213 }
3214 }
3215}
3216
3217/*
3218 * transmit side
3219 */
3220
3221/*
3222 * Process a "command done" firmware notification. This is where we wakeup
3223 * processes waiting for a synchronous command completion.
3224 * from if_iwn
3225 */
3226static void
3227iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3228{
3229 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3230 struct iwm_tx_data *data;
3231
3232 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3233 return; /* Not a command ack. */
3234 }
3235
3236 data = &ring->data[pkt->hdr.idx];
3237
3238 /* If the command was mapped in an mbuf, free it. */
3239 if (data->m != NULL) {
3240 bus_dmamap_sync(ring->data_dmat, data->map,
3241 BUS_DMASYNC_POSTWRITE);
3242 bus_dmamap_unload(ring->data_dmat, data->map);
3243 m_freem(data->m);
3244 data->m = NULL;
3245 }
3246 wakeup(&ring->desc[pkt->hdr.idx]);
3247}
3248
3249#if 0
3250/*
3251 * necessary only for block ack mode
3252 */
3253void
3254iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3255 uint16_t len)
3256{
3257 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3258 uint16_t w_val;
3259
3260 scd_bc_tbl = sc->sched_dma.vaddr;
3261
3262 len += 8; /* magic numbers came naturally from paris */
3263 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3264 len = roundup(len, 4) / 4;
3265
3266 w_val = htole16(sta_id << 12 | len);
3267
3268 /* Update TX scheduler. */
3269 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3270 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3271 BUS_DMASYNC_PREWRITE);
3272
3273 /* I really wonder what this is ?!? */
3274 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3275 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3276 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3277 BUS_DMASYNC_PREWRITE);
3278 }
3279}
3280#endif
3281
3282/*
3283 * Take an 802.11 (non-n) rate, find the relevant rate
3284 * table entry. return the index into in_ridx[].
3285 *
3286 * The caller then uses that index back into in_ridx
3287 * to figure out the rate index programmed /into/
3288 * the firmware for this given node.
3289 */
3290static int
3291iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3292 uint8_t rate)
3293{
3294 int i;
3295 uint8_t r;
3296
3297 for (i = 0; i < nitems(in->in_ridx); i++) {
3298 r = iwm_rates[in->in_ridx[i]].rate;
3299 if (rate == r)
3300 return (i);
3301 }
3302 /* XXX Return the first */
3303 /* XXX TODO: have it return the /lowest/ */
3304 return (0);
3305}
3306
3307/*
3308 * Fill in the rate related information for a transmit command.
3309 */
3310static const struct iwm_rate *
3311iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3312 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3313{
3314 struct ieee80211com *ic = &sc->sc_ic;
3315 struct ieee80211_node *ni = &in->in_ni;
3316 const struct iwm_rate *rinfo;
3317 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3318 int ridx, rate_flags;
3319
3320 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3321 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3322
3323 /*
3324 * XXX TODO: everything about the rate selection here is terrible!
3325 */
3326
3327 if (type == IEEE80211_FC0_TYPE_DATA) {
3328 int i;
3329 /* for data frames, use RS table */
3330 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3331 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3332 ridx = in->in_ridx[i];
3333
3334 /* This is the index into the programmed table */
3335 tx->initial_rate_index = i;
3336 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3337 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3338 "%s: start with i=%d, txrate %d\n",
3339 __func__, i, iwm_rates[ridx].rate);
3340 } else {
3341 /*
3342 * For non-data, use the lowest supported rate for the given
3343 * operational mode.
3344 *
3345 * Note: there may not be any rate control information available.
3346 * This driver currently assumes if we're transmitting data
3347 * frames, use the rate control table. Grr.
3348 *
3349 * XXX TODO: use the configured rate for the traffic type!
3350 * XXX TODO: this should be per-vap, not curmode; as we later
3351 * on we'll want to handle off-channel stuff (eg TDLS).
3352 */
3353 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3354 /*
3355 * XXX this assumes the mode is either 11a or not 11a;
3356 * definitely won't work for 11n.
3357 */
3358 ridx = IWM_RIDX_OFDM;
3359 } else {
3360 ridx = IWM_RIDX_CCK;
3361 }
3362 }
3363
3364 rinfo = &iwm_rates[ridx];
3365
3366 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3367 __func__, ridx,
3368 rinfo->rate,
3369 !! (IWM_RIDX_IS_CCK(ridx))
3370 );
3371
3372 /* XXX TODO: hard-coded TX antenna? */
3373 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3374 if (IWM_RIDX_IS_CCK(ridx))
3375 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3376 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3377
3378 return rinfo;
3379}
3380
3381#define TB0_SIZE 16
3382static int
3383iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3384{
3385 struct ieee80211com *ic = &sc->sc_ic;
3386 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3387 struct iwm_node *in = IWM_NODE(ni);
3388 struct iwm_tx_ring *ring;
3389 struct iwm_tx_data *data;
3390 struct iwm_tfd *desc;
3391 struct iwm_device_cmd *cmd;
3392 struct iwm_tx_cmd *tx;
3393 struct ieee80211_frame *wh;
3394 struct ieee80211_key *k = NULL;
3395#if !defined(__DragonFly__)
3396 struct mbuf *m1;
3397#endif
3398 const struct iwm_rate *rinfo;
3399 uint32_t flags;
3400 u_int hdrlen;
3401 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3402 int nsegs;
3403 uint8_t tid, type;
3404 int i, totlen, error, pad;
3405
3406 wh = mtod(m, struct ieee80211_frame *);
3407 hdrlen = ieee80211_anyhdrsize(wh);
3408 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3409 tid = 0;
3410 ring = &sc->txq[ac];
3411 desc = &ring->desc[ring->cur];
3412 memset(desc, 0, sizeof(*desc));
3413 data = &ring->data[ring->cur];
3414
3415 /* Fill out iwm_tx_cmd to send to the firmware */
3416 cmd = &ring->cmd[ring->cur];
3417 cmd->hdr.code = IWM_TX_CMD;
3418 cmd->hdr.flags = 0;
3419 cmd->hdr.qid = ring->qid;
3420 cmd->hdr.idx = ring->cur;
3421
3422 tx = (void *)cmd->data;
3423 memset(tx, 0, sizeof(*tx));
3424
3425 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3426
3427 /* Encrypt the frame if need be. */
3428 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3429 /* Retrieve key for TX && do software encryption. */
3430 k = ieee80211_crypto_encap(ni, m);
3431 if (k == NULL) {
3432 m_freem(m);
3433 return (ENOBUFS);
3434 }
3435 /* 802.11 header may have moved. */
3436 wh = mtod(m, struct ieee80211_frame *);
3437 }
3438
3439 if (ieee80211_radiotap_active_vap(vap)) {
3440 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3441
3442 tap->wt_flags = 0;
3443 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3444 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3445 tap->wt_rate = rinfo->rate;
3446 if (k != NULL)
3447 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3448 ieee80211_radiotap_tx(vap, m);
3449 }
3450
3451
3452 totlen = m->m_pkthdr.len;
3453
3454 flags = 0;
3455 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3456 flags |= IWM_TX_CMD_FLG_ACK;
3457 }
3458
3459 if (type == IEEE80211_FC0_TYPE_DATA
3460 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3461 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3462 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3463 }
3464
3465 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3466 type != IEEE80211_FC0_TYPE_DATA)
3467 tx->sta_id = sc->sc_aux_sta.sta_id;
3468 else
3469 tx->sta_id = IWM_STATION_ID;
3470
3471 if (type == IEEE80211_FC0_TYPE_MGT) {
3472 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3473
3474 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3475 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3476 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3477 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3478 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3479 } else {
3480 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3481 }
3482 } else {
3483 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3484 }
3485
3486 if (hdrlen & 3) {
3487 /* First segment length must be a multiple of 4. */
3488 flags |= IWM_TX_CMD_FLG_MH_PAD;
3489 pad = 4 - (hdrlen & 3);
3490 } else
3491 pad = 0;
3492
3493 tx->driver_txop = 0;
3494 tx->next_frame_len = 0;
3495
3496 tx->len = htole16(totlen);
3497 tx->tid_tspec = tid;
3498 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3499
3500 /* Set physical address of "scratch area". */
3501 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3502 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3503
3504 /* Copy 802.11 header in TX command. */
3505 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3506
3507 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3508
3509 tx->sec_ctl = 0;
3510 tx->tx_flags |= htole32(flags);
3511
3512 /* Trim 802.11 header. */
3513 m_adj(m, hdrlen);
3514#if defined(__DragonFly__)
3515 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
3516 segs, IWM_MAX_SCATTER - 2,
3517 &nsegs, BUS_DMA_NOWAIT);
3518#else
3519 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3520 segs, &nsegs, BUS_DMA_NOWAIT);
3521#endif
3522 if (error != 0) {
3523#if defined(__DragonFly__)
3524 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3525 error);
3526 m_freem(m);
3527 return error;
3528#else
3529 if (error != EFBIG) {
3530 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3531 error);
3532 m_freem(m);
3533 return error;
3534 }
3535 /* Too many DMA segments, linearize mbuf. */
3536 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3537 if (m1 == NULL) {
3538 device_printf(sc->sc_dev,
3539 "%s: could not defrag mbuf\n", __func__);
3540 m_freem(m);
3541 return (ENOBUFS);
3542 }
3543 m = m1;
3544
3545 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3546 segs, &nsegs, BUS_DMA_NOWAIT);
3547 if (error != 0) {
3548 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3549 error);
3550 m_freem(m);
3551 return error;
3552 }
3553#endif
3554 }
3555 data->m = m;
3556 data->in = in;
3557 data->done = 0;
3558
3559 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3560 "sending txd %p, in %p\n", data, data->in);
3561 KASSERT(data->in != NULL, ("node is NULL"));
3562
3563 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3564 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3565 ring->qid, ring->cur, totlen, nsegs,
3566 le32toh(tx->tx_flags),
3567 le32toh(tx->rate_n_flags),
3568 tx->initial_rate_index
3569 );
3570
3571 /* Fill TX descriptor. */
3572 desc->num_tbs = 2 + nsegs;
3573
3574 desc->tbs[0].lo = htole32(data->cmd_paddr);
3575 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3576 (TB0_SIZE << 4);
3577 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3578 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3579 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3580 + hdrlen + pad - TB0_SIZE) << 4);
3581
3582 /* Other DMA segments are for data payload. */
3583 for (i = 0; i < nsegs; i++) {
3584 seg = &segs[i];
3585 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3586 desc->tbs[i+2].hi_n_len = \
3587 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3588 | ((seg->ds_len) << 4);
3589 }
3590
3591 bus_dmamap_sync(ring->data_dmat, data->map,
3592 BUS_DMASYNC_PREWRITE);
3593 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3594 BUS_DMASYNC_PREWRITE);
3595 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3596 BUS_DMASYNC_PREWRITE);
3597
3598#if 0
3599 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3600#endif
3601
3602 /* Kick TX ring. */
3603 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3604 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3605
3606 /* Mark TX ring as full if we reach a certain threshold. */
3607 if (++ring->queued > IWM_TX_RING_HIMARK) {
3608 sc->qfullmsk |= 1 << ring->qid;
3609 }
3610
3611 return 0;
3612}
3613
3614static int
3615iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3616 const struct ieee80211_bpf_params *params)
3617{
3618 struct ieee80211com *ic = ni->ni_ic;
3619 struct iwm_softc *sc = ic->ic_softc;
3620 int error = 0;
3621
3622 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3623 "->%s begin\n", __func__);
3624
3625 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3626 m_freem(m);
3627 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3628 "<-%s not RUNNING\n", __func__);
3629 return (ENETDOWN);
3630 }
3631
3632 IWM_LOCK(sc);
3633 /* XXX fix this */
3634 if (params == NULL) {
3635 error = iwm_tx(sc, m, ni, 0);
3636 } else {
3637 error = iwm_tx(sc, m, ni, 0);
3638 }
3639 sc->sc_tx_timer = 5;
3640 IWM_UNLOCK(sc);
3641
3642 return (error);
3643}
3644
3645/*
3646 * mvm/tx.c
3647 */
3648
3649#if 0
3650/*
3651 * Note that there are transports that buffer frames before they reach
3652 * the firmware. This means that after flush_tx_path is called, the
3653 * queue might not be empty. The race-free way to handle this is to:
3654 * 1) set the station as draining
3655 * 2) flush the Tx path
3656 * 3) wait for the transport queues to be empty
3657 */
3658int
3659iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3660{
3661 struct iwm_tx_path_flush_cmd flush_cmd = {
3662 .queues_ctl = htole32(tfd_msk),
3663 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3664 };
3665 int ret;
3666
3667 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3668 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3669 sizeof(flush_cmd), &flush_cmd);
3670 if (ret)
3671 device_printf(sc->sc_dev,
3672 "Flushing tx queue failed: %d\n", ret);
3673 return ret;
3674}
3675#endif
3676
3677static int
3678iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3679 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3680{
3681 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3682 cmd, status);
3683}
3684
3685/* send station add/update command to firmware */
3686static int
3687iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3688{
3689 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3690 int ret;
3691 uint32_t status;
3692
3693 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3694
3695 add_sta_cmd.sta_id = IWM_STATION_ID;
3696 add_sta_cmd.mac_id_n_color
3697 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3698 IWM_DEFAULT_COLOR));
3699 if (!update) {
3700 int ac;
3701 for (ac = 0; ac < WME_NUM_AC; ac++) {
3702 add_sta_cmd.tfd_queue_msk |=
3703 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3704 }
3705 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3706 }
3707 add_sta_cmd.add_modify = update ? 1 : 0;
3708 add_sta_cmd.station_flags_msk
3709 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3710 add_sta_cmd.tid_disable_tx = htole16(0xffff);
3711 if (update)
3712 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3713
3714 status = IWM_ADD_STA_SUCCESS;
3715 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3716 if (ret)
3717 return ret;
3718
3719 switch (status) {
3720 case IWM_ADD_STA_SUCCESS:
3721 break;
3722 default:
3723 ret = EIO;
3724 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3725 break;
3726 }
3727
3728 return ret;
3729}
3730
3731static int
3732iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3733{
3734 return iwm_mvm_sta_send_to_fw(sc, in, 0);
3735}
3736
3737static int
3738iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3739{
3740 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3741}
3742
3743static int
3744iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3745 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3746{
3747 struct iwm_mvm_add_sta_cmd_v7 cmd;
3748 int ret;
3749 uint32_t status;
3750
3751 memset(&cmd, 0, sizeof(cmd));
3752 cmd.sta_id = sta->sta_id;
3753 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3754
3755 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3756 cmd.tid_disable_tx = htole16(0xffff);
3757
3758 if (addr)
3759 IEEE80211_ADDR_COPY(cmd.addr, addr);
3760
3761 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3762 if (ret)
3763 return ret;
3764
3765 switch (status) {
3766 case IWM_ADD_STA_SUCCESS:
3767 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3768 "%s: Internal station added.\n", __func__);
3769 return 0;
3770 default:
3771 device_printf(sc->sc_dev,
3772 "%s: Add internal station failed, status=0x%x\n",
3773 __func__, status);
3774 ret = EIO;
3775 break;
3776 }
3777 return ret;
3778}
3779
3780static int
3781iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3782{
3783 int ret;
3784
3785 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3786 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3787
3788 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3789 if (ret)
3790 return ret;
3791
3792 ret = iwm_mvm_add_int_sta_common(sc,
3793 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3794
3795 if (ret)
3796 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3797 return ret;
3798}
3799
3800static int
3801iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3802{
3803 struct iwm_time_quota_cmd cmd;
3804 int i, idx, ret, num_active_macs, quota, quota_rem;
3805 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3806 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3807 uint16_t id;
3808
3809 memset(&cmd, 0, sizeof(cmd));
3810
3811 /* currently, PHY ID == binding ID */
3812 if (in) {
3813 id = in->in_phyctxt->id;
3814 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3815 colors[id] = in->in_phyctxt->color;
3816
3817 if (1)
3818 n_ifs[id] = 1;
3819 }
3820
3821 /*
3822 * The FW's scheduling session consists of
3823 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3824 * equally between all the bindings that require quota
3825 */
3826 num_active_macs = 0;
3827 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3828 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3829 num_active_macs += n_ifs[i];
3830 }
3831
3832 quota = 0;
3833 quota_rem = 0;
3834 if (num_active_macs) {
3835 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3836 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3837 }
3838
3839 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3840 if (colors[i] < 0)
3841 continue;
3842
3843 cmd.quotas[idx].id_and_color =
3844 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3845
3846 if (n_ifs[i] <= 0) {
3847 cmd.quotas[idx].quota = htole32(0);
3848 cmd.quotas[idx].max_duration = htole32(0);
3849 } else {
3850 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3851 cmd.quotas[idx].max_duration = htole32(0);
3852 }
3853 idx++;
3854 }
3855
3856 /* Give the remainder of the session to the first binding */
3857 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3858
3859 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3860 sizeof(cmd), &cmd);
3861 if (ret)
3862 device_printf(sc->sc_dev,
3863 "%s: Failed to send quota: %d\n", __func__, ret);
3864 return ret;
3865}
3866
3867/*
3868 * ieee80211 routines
3869 */
3870
3871/*
3872 * Change to AUTH state in 80211 state machine. Roughly matches what
3873 * Linux does in bss_info_changed().
3874 */
3875static int
3876iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3877{
3878 struct ieee80211_node *ni;
3879 struct iwm_node *in;
3880 struct iwm_vap *iv = IWM_VAP(vap);
3881 uint32_t duration;
3882 int error;
3883
3884 /*
3885 * XXX i have a feeling that the vap node is being
3886 * freed from underneath us. Grr.
3887 */
3888 ni = ieee80211_ref_node(vap->iv_bss);
3889 in = IWM_NODE(ni);
3890 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3891 "%s: called; vap=%p, bss ni=%p\n",
3892 __func__,
3893 vap,
3894 ni);
3895
3896 in->in_assoc = 0;
3897
3898 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3899 if (error != 0)
3900 return error;
3901
3902 error = iwm_allow_mcast(vap, sc);
3903 if (error) {
3904 device_printf(sc->sc_dev,
3905 "%s: failed to set multicast\n", __func__);
3906 goto out;
3907 }
3908
3909 /*
3910 * This is where it deviates from what Linux does.
3911 *
3912 * Linux iwlwifi doesn't reset the nic each time, nor does it
3913 * call ctxt_add() here. Instead, it adds it during vap creation,
3914 * and always does a mac_ctx_changed().
3915 *
3916 * The openbsd port doesn't attempt to do that - it reset things
3917 * at odd states and does the add here.
3918 *
3919 * So, until the state handling is fixed (ie, we never reset
3920 * the NIC except for a firmware failure, which should drag
3921 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3922 * contexts that are required), let's do a dirty hack here.
3923 */
3924 if (iv->is_uploaded) {
3925 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3926 device_printf(sc->sc_dev,
3927 "%s: failed to update MAC\n", __func__);
3928 goto out;
3929 }
3930 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3931 in->in_ni.ni_chan, 1, 1)) != 0) {
3932 device_printf(sc->sc_dev,
3933 "%s: failed update phy ctxt\n", __func__);
3934 goto out;
3935 }
3936 in->in_phyctxt = &sc->sc_phyctxt[0];
3937
3938 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3939 device_printf(sc->sc_dev,
3940 "%s: binding update cmd\n", __func__);
3941 goto out;
3942 }
3943 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3944 device_printf(sc->sc_dev,
3945 "%s: failed to update sta\n", __func__);
3946 goto out;
3947 }
3948 } else {
3949 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3950 device_printf(sc->sc_dev,
3951 "%s: failed to add MAC\n", __func__);
3952 goto out;
3953 }
3954 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3955 in->in_ni.ni_chan, 1, 1)) != 0) {
3956 device_printf(sc->sc_dev,
3957 "%s: failed add phy ctxt!\n", __func__);
3958 error = ETIMEDOUT;
3959 goto out;
3960 }
3961 in->in_phyctxt = &sc->sc_phyctxt[0];
3962
3963 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3964 device_printf(sc->sc_dev,
3965 "%s: binding add cmd\n", __func__);
3966 goto out;
3967 }
3968 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3969 device_printf(sc->sc_dev,
3970 "%s: failed to add sta\n", __func__);
3971 goto out;
3972 }
3973 }
3974
3975 /*
3976 * Prevent the FW from wandering off channel during association
3977 * by "protecting" the session with a time event.
3978 */
3979 /* XXX duration is in units of TU, not MS */
3980 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3981 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3982 DELAY(100);
3983
3984 error = 0;
3985out:
3986 ieee80211_free_node(ni);
3987 return (error);
3988}
3989
3990static int
3991iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3992{
3993 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3994 int error;
3995
3996 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3997 device_printf(sc->sc_dev,
3998 "%s: failed to update STA\n", __func__);
3999 return error;
4000 }
4001
4002 in->in_assoc = 1;
4003 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4004 device_printf(sc->sc_dev,
4005 "%s: failed to update MAC\n", __func__);
4006 return error;
4007 }
4008
4009 return 0;
4010}
4011
4012static int
4013iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4014{
4015 /*
4016 * Ok, so *technically* the proper set of calls for going
4017 * from RUN back to SCAN is:
4018 *
4019 * iwm_mvm_power_mac_disable(sc, in);
4020 * iwm_mvm_mac_ctxt_changed(sc, in);
4021 * iwm_mvm_rm_sta(sc, in);
4022 * iwm_mvm_update_quotas(sc, NULL);
4023 * iwm_mvm_mac_ctxt_changed(sc, in);
4024 * iwm_mvm_binding_remove_vif(sc, in);
4025 * iwm_mvm_mac_ctxt_remove(sc, in);
4026 *
4027 * However, that freezes the device not matter which permutations
4028 * and modifications are attempted. Obviously, this driver is missing
4029 * something since it works in the Linux driver, but figuring out what
4030 * is missing is a little more complicated. Now, since we're going
4031 * back to nothing anyway, we'll just do a complete device reset.
4032 * Up your's, device!
4033 */
4034 /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
4035 iwm_stop_device(sc);
4036 iwm_init_hw(sc);
4037 if (in)
4038 in->in_assoc = 0;
4039 return 0;
4040
4041#if 0
4042 int error;
4043
4044 iwm_mvm_power_mac_disable(sc, in);
4045
4046 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4047 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4048 return error;
4049 }
4050
4051 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4052 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4053 return error;
4054 }
4055 error = iwm_mvm_rm_sta(sc, in);
4056 in->in_assoc = 0;
4057 iwm_mvm_update_quotas(sc, NULL);
4058 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4059 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4060 return error;
4061 }
4062 iwm_mvm_binding_remove_vif(sc, in);
4063
4064 iwm_mvm_mac_ctxt_remove(sc, in);
4065
4066 return error;
4067#endif
4068}
4069
4070static struct ieee80211_node *
4071iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4072{
4073 return kmalloc(sizeof (struct iwm_node), M_80211_NODE,
4074 M_INTWAIT | M_ZERO);
4075}
4076
4077static void
4078iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4079{
4080 struct ieee80211_node *ni = &in->in_ni;
4081 struct iwm_lq_cmd *lq = &in->in_lq;
4082 int nrates = ni->ni_rates.rs_nrates;
4083 int i, ridx, tab = 0;
4084 int txant = 0;
4085
4086 if (nrates > nitems(lq->rs_table)) {
4087 device_printf(sc->sc_dev,
4088 "%s: node supports %d rates, driver handles "
4089 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4090 return;
4091 }
4092 if (nrates == 0) {
4093 device_printf(sc->sc_dev,
4094 "%s: node supports 0 rates, odd!\n", __func__);
4095 return;
4096 }
4097
4098 /*
4099 * XXX .. and most of iwm_node is not initialised explicitly;
4100 * it's all just 0x0 passed to the firmware.
4101 */
4102
4103 /* first figure out which rates we should support */
4104 /* XXX TODO: this isn't 11n aware /at all/ */
4105 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4106 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4107 "%s: nrates=%d\n", __func__, nrates);
4108
4109 /*
4110 * Loop over nrates and populate in_ridx from the highest
4111 * rate to the lowest rate. Remember, in_ridx[] has
4112 * IEEE80211_RATE_MAXSIZE entries!
4113 */
4114 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4115 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4116
4117 /* Map 802.11 rate to HW rate index. */
4118 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4119 if (iwm_rates[ridx].rate == rate)
4120 break;
4121 if (ridx > IWM_RIDX_MAX) {
4122 device_printf(sc->sc_dev,
4123 "%s: WARNING: device rate for %d not found!\n",
4124 __func__, rate);
4125 } else {
4126 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4127 "%s: rate: i: %d, rate=%d, ridx=%d\n",
4128 __func__,
4129 i,
4130 rate,
4131 ridx);
4132 in->in_ridx[i] = ridx;
4133 }
4134 }
4135
4136 /* then construct a lq_cmd based on those */
4137 memset(lq, 0, sizeof(*lq));
4138 lq->sta_id = IWM_STATION_ID;
4139
4140 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4141 if (ni->ni_flags & IEEE80211_NODE_HT)
4142 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4143
4144 /*
4145 * are these used? (we don't do SISO or MIMO)
4146 * need to set them to non-zero, though, or we get an error.
4147 */
4148 lq->single_stream_ant_msk = 1;
4149 lq->dual_stream_ant_msk = 1;
4150
4151 /*
4152 * Build the actual rate selection table.
4153 * The lowest bits are the rates. Additionally,
4154 * CCK needs bit 9 to be set. The rest of the bits
4155 * we add to the table select the tx antenna
4156 * Note that we add the rates in the highest rate first
4157 * (opposite of ni_rates).
4158 */
4159 /*
4160 * XXX TODO: this should be looping over the min of nrates
4161 * and LQ_MAX_RETRY_NUM. Sigh.
4162 */
4163 for (i = 0; i < nrates; i++) {
4164 int nextant;
4165
4166 if (txant == 0)
4167 txant = iwm_fw_valid_tx_ant(sc);
4168 nextant = 1<<(ffs(txant)-1);
4169 txant &= ~nextant;
4170
4171 /*
4172 * Map the rate id into a rate index into
4173 * our hardware table containing the
4174 * configuration to use for this rate.
4175 */
4176 ridx = in->in_ridx[i];
4177 tab = iwm_rates[ridx].plcp;
4178 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4179 if (IWM_RIDX_IS_CCK(ridx))
4180 tab |= IWM_RATE_MCS_CCK_MSK;
4181 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4182 "station rate i=%d, rate=%d, hw=%x\n",
4183 i, iwm_rates[ridx].rate, tab);
4184 lq->rs_table[i] = htole32(tab);
4185 }
4186 /* then fill the rest with the lowest possible rate */
4187 for (i = nrates; i < nitems(lq->rs_table); i++) {
4188 KASSERT(tab != 0, ("invalid tab"));
4189 lq->rs_table[i] = htole32(tab);
4190 }
4191}
4192
4193static int
4194iwm_media_change(struct ifnet *ifp)
4195{
4196 struct ieee80211vap *vap = ifp->if_softc;
4197 struct ieee80211com *ic = vap->iv_ic;
4198 struct iwm_softc *sc = ic->ic_softc;
4199 int error;
4200
4201 error = ieee80211_media_change(ifp);
4202 if (error != ENETRESET)
4203 return error;
4204
4205 IWM_LOCK(sc);
4206 if (ic->ic_nrunning > 0) {
4207 iwm_stop(sc);
4208 iwm_init(sc);
4209 }
4210 IWM_UNLOCK(sc);
4211 return error;
4212}
4213
4214
4215static int
4216iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4217{
4218 struct iwm_vap *ivp = IWM_VAP(vap);
4219 struct ieee80211com *ic = vap->iv_ic;
4220 struct iwm_softc *sc = ic->ic_softc;
4221 struct iwm_node *in;
4222 int error;
4223
4224 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4225 "switching state %s -> %s\n",
4226 ieee80211_state_name[vap->iv_state],
4227 ieee80211_state_name[nstate]);
4228 IEEE80211_UNLOCK(ic);
4229 IWM_LOCK(sc);
4230
4231 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4232 iwm_led_blink_stop(sc);
4233
4234 /* disable beacon filtering if we're hopping out of RUN */
4235 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4236 iwm_mvm_disable_beacon_filter(sc);
4237
4238 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4239 in->in_assoc = 0;
4240
4241 iwm_release(sc, NULL);
4242
4243 /*
4244 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4245 * above then the card will be completely reinitialized,
4246 * so the driver must do everything necessary to bring the card
4247 * from INIT to SCAN.
4248 *
4249 * Additionally, upon receiving deauth frame from AP,
4250 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4251 * state. This will also fail with this driver, so bring the FSM
4252 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4253 *
4254 * XXX TODO: fix this for FreeBSD!
4255 */
4256 if (nstate == IEEE80211_S_SCAN ||
4257 nstate == IEEE80211_S_AUTH ||
4258 nstate == IEEE80211_S_ASSOC) {
4259 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4260 "Force transition to INIT; MGT=%d\n", arg);
4261 IWM_UNLOCK(sc);
4262 IEEE80211_LOCK(ic);
4263 /* Always pass arg as -1 since we can't Tx right now. */
4264 /*
4265 * XXX arg is just ignored anyway when transitioning
4266 * to IEEE80211_S_INIT.
4267 */
4268 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4269 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4270 "Going INIT->SCAN\n");
4271 nstate = IEEE80211_S_SCAN;
4272 IEEE80211_UNLOCK(ic);
4273 IWM_LOCK(sc);
4274 }
4275 }
4276
4277 switch (nstate) {
4278 case IEEE80211_S_INIT:
4279 break;
4280
4281 case IEEE80211_S_AUTH:
4282 if ((error = iwm_auth(vap, sc)) != 0) {
4283 device_printf(sc->sc_dev,
4284 "%s: could not move to auth state: %d\n",
4285 __func__, error);
4286 break;
4287 }
4288 break;
4289
4290 case IEEE80211_S_ASSOC:
4291 if ((error = iwm_assoc(vap, sc)) != 0) {
4292 device_printf(sc->sc_dev,
4293 "%s: failed to associate: %d\n", __func__,
4294 error);
4295 break;
4296 }
4297 break;
4298
4299 case IEEE80211_S_RUN:
4300 {
4301 struct iwm_host_cmd cmd = {
4302 .id = IWM_LQ_CMD,
4303 .len = { sizeof(in->in_lq), },
4304 .flags = IWM_CMD_SYNC,
4305 };
4306
4307 /* Update the association state, now we have it all */
4308 /* (eg associd comes in at this point */
4309 error = iwm_assoc(vap, sc);
4310 if (error != 0) {
4311 device_printf(sc->sc_dev,
4312 "%s: failed to update association state: %d\n",
4313 __func__,
4314 error);
4315 break;
4316 }
4317
4318 in = IWM_NODE(vap->iv_bss);
4319 iwm_mvm_power_mac_update_mode(sc, in);
4320 iwm_mvm_enable_beacon_filter(sc, in);
4321 iwm_mvm_update_quotas(sc, in);
4322 iwm_setrates(sc, in);
4323
4324 cmd.data[0] = &in->in_lq;
4325 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4326 device_printf(sc->sc_dev,
4327 "%s: IWM_LQ_CMD failed\n", __func__);
4328 }
4329
4330 iwm_mvm_led_enable(sc);
4331 break;
4332 }
4333
4334 default:
4335 break;
4336 }
4337 IWM_UNLOCK(sc);
4338 IEEE80211_LOCK(ic);
4339
4340 return (ivp->iv_newstate(vap, nstate, arg));
4341}
4342
4343void
4344iwm_endscan_cb(void *arg, int pending)
4345{
4346 struct iwm_softc *sc = arg;
4347 struct ieee80211com *ic = &sc->sc_ic;
4348
4349 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4350 "%s: scan ended\n",
4351 __func__);
4352
4353 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4354}
4355
4356/*
4357 * Aging and idle timeouts for the different possible scenarios
4358 * in default configuration
4359 */
4360static const uint32_t
4361iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4362 {
4363 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4364 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4365 },
4366 {
4367 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4368 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4369 },
4370 {
4371 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4372 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4373 },
4374 {
4375 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4376 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4377 },
4378 {
4379 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4380 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4381 },
4382};
4383
4384/*
4385 * Aging and idle timeouts for the different possible scenarios
4386 * in single BSS MAC configuration.
4387 */
4388static const uint32_t
4389iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4390 {
4391 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4392 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4393 },
4394 {
4395 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4396 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4397 },
4398 {
4399 htole32(IWM_SF_MCAST_AGING_TIMER),
4400 htole32(IWM_SF_MCAST_IDLE_TIMER)
4401 },
4402 {
4403 htole32(IWM_SF_BA_AGING_TIMER),
4404 htole32(IWM_SF_BA_IDLE_TIMER)
4405 },
4406 {
4407 htole32(IWM_SF_TX_RE_AGING_TIMER),
4408 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4409 },
4410};
4411
4412static void
4413iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4414 struct ieee80211_node *ni)
4415{
4416 int i, j, watermark;
4417
4418 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4419
4420 /*
4421 * If we are in association flow - check antenna configuration
4422 * capabilities of the AP station, and choose the watermark accordingly.
4423 */
4424 if (ni) {
4425 if (ni->ni_flags & IEEE80211_NODE_HT) {
4426#ifdef notyet
4427 if (ni->ni_rxmcs[2] != 0)
4428 watermark = IWM_SF_W_MARK_MIMO3;
4429 else if (ni->ni_rxmcs[1] != 0)
4430 watermark = IWM_SF_W_MARK_MIMO2;
4431 else
4432#endif
4433 watermark = IWM_SF_W_MARK_SISO;
4434 } else {
4435 watermark = IWM_SF_W_MARK_LEGACY;
4436 }
4437 /* default watermark value for unassociated mode. */
4438 } else {
4439 watermark = IWM_SF_W_MARK_MIMO2;
4440 }
4441 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4442
4443 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4444 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4445 sf_cmd->long_delay_timeouts[i][j] =
4446 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4447 }
4448 }
4449
4450 if (ni) {
4451 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4452 sizeof(iwm_sf_full_timeout));
4453 } else {
4454 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4455 sizeof(iwm_sf_full_timeout_def));
4456 }
4457}
4458
4459static int
4460iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4461{
4462 struct ieee80211com *ic = &sc->sc_ic;
4463 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4464 struct iwm_sf_cfg_cmd sf_cmd = {
4465 .state = htole32(IWM_SF_FULL_ON),
4466 };
4467 int ret = 0;
4468
4469 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4470 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4471
4472 switch (new_state) {
4473 case IWM_SF_UNINIT:
4474 case IWM_SF_INIT_OFF:
4475 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4476 break;
4477 case IWM_SF_FULL_ON:
4478 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4479 break;
4480 default:
4481 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4482 "Invalid state: %d. not sending Smart Fifo cmd\n",
4483 new_state);
4484 return EINVAL;
4485 }
4486
4487 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4488 sizeof(sf_cmd), &sf_cmd);
4489 return ret;
4490}
4491
4492static int
4493iwm_send_bt_init_conf(struct iwm_softc *sc)
4494{
4495 struct iwm_bt_coex_cmd bt_cmd;
4496
4497 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4498 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4499
4500 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4501 &bt_cmd);
4502}
4503
4504static int
4505iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4506{
4507 struct iwm_mcc_update_cmd mcc_cmd;
4508 struct iwm_host_cmd hcmd = {
4509 .id = IWM_MCC_UPDATE_CMD,
4510 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4511 .data = { &mcc_cmd },
4512 };
4513 int ret;
4514#ifdef IWM_DEBUG
4515 struct iwm_rx_packet *pkt;
4516 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4517 struct iwm_mcc_update_resp *mcc_resp;
4518 int n_channels;
4519 uint16_t mcc;
4520#endif
4521 int resp_v2 = isset(sc->sc_enabled_capa,
4522 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4523
4524 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4525 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4526 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4527 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4528 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4529 else
4530 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4531
4532 if (resp_v2)
4533 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4534 else
4535 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4536
4537 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4538 "send MCC update to FW with '%c%c' src = %d\n",
4539 alpha2[0], alpha2[1], mcc_cmd.source_id);
4540
4541 ret = iwm_send_cmd(sc, &hcmd);
4542 if (ret)
4543 return ret;
4544
4545#ifdef IWM_DEBUG
4546 pkt = hcmd.resp_pkt;
4547
4548 /* Extract MCC response */
4549 if (resp_v2) {
4550 mcc_resp = (void *)pkt->data;
4551 mcc = mcc_resp->mcc;
4552 n_channels = le32toh(mcc_resp->n_channels);
4553 } else {
4554 mcc_resp_v1 = (void *)pkt->data;
4555 mcc = mcc_resp_v1->mcc;
4556 n_channels = le32toh(mcc_resp_v1->n_channels);
4557 }
4558
4559 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4560 if (mcc == 0)
4561 mcc = 0x3030; /* "00" - world */
4562
4563 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4564 "regulatory domain '%c%c' (%d channels available)\n",
4565 mcc >> 8, mcc & 0xff, n_channels);
4566#endif
4567 iwm_free_resp(sc, &hcmd);
4568
4569 return 0;
4570}
4571
4572static void
4573iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4574{
4575 struct iwm_host_cmd cmd = {
4576 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4577 .len = { sizeof(uint32_t), },
4578 .data = { &backoff, },
4579 };
4580
4581 if (iwm_send_cmd(sc, &cmd) != 0) {
4582 device_printf(sc->sc_dev,
4583 "failed to change thermal tx backoff\n");
4584 }
4585}
4586
4587static int
4588iwm_init_hw(struct iwm_softc *sc)
4589{
4590 struct ieee80211com *ic = &sc->sc_ic;
4591 int error, i, ac;
4592
4593 if ((error = iwm_start_hw(sc)) != 0) {
4594 kprintf("iwm_start_hw: failed %d\n", error);
4595 return error;
4596 }
4597
4598 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4599 kprintf("iwm_run_init_mvm_ucode: failed %d\n", error);
4600 return error;
4601 }
4602
4603 /*
4604 * should stop and start HW since that INIT
4605 * image just loaded
4606 */
4607 iwm_stop_device(sc);
4608 if ((error = iwm_start_hw(sc)) != 0) {
4609 device_printf(sc->sc_dev, "could not initialize hardware\n");
4610 return error;
4611 }
4612
4613 /* omstart, this time with the regular firmware */
4614 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4615 if (error) {
4616 device_printf(sc->sc_dev, "could not load firmware\n");
4617 goto error;
4618 }
4619
4620 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4621 device_printf(sc->sc_dev, "bt init conf failed\n");
4622 goto error;
4623 }
4624
4625 if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4626 device_printf(sc->sc_dev, "antenna config failed\n");
4627 goto error;
4628 }
4629
4630 /* Send phy db control command and then phy db calibration */
4631 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4632 goto error;
4633
4634 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4635 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4636 goto error;
4637 }
4638
4639 /* Add auxiliary station for scanning */
4640 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4641 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4642 goto error;
4643 }
4644
4645 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4646 /*
4647 * The channel used here isn't relevant as it's
4648 * going to be overwritten in the other flows.
4649 * For now use the first channel we have.
4650 */
4651 if ((error = iwm_mvm_phy_ctxt_add(sc,
4652 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4653 goto error;
4654 }
4655
4656 /* Initialize tx backoffs to the minimum. */
4657 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4658 iwm_mvm_tt_tx_backoff(sc, 0);
4659
4660 error = iwm_mvm_power_update_device(sc);
4661 if (error)
4662 goto error;
4663
4664 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4665 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4666 goto error;
4667 }
4668
4669 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4670 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4671 goto error;
4672 }
4673
4674 /* Enable Tx queues. */
4675 for (ac = 0; ac < WME_NUM_AC; ac++) {
4676 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4677 iwm_mvm_ac_to_tx_fifo[ac]);
4678 if (error)
4679 goto error;
4680 }
4681
4682 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4683 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4684 goto error;
4685 }
4686
4687 return 0;
4688
4689 error:
4690 iwm_stop_device(sc);
4691 return error;
4692}
4693
4694/* Allow multicast from our BSSID. */
4695static int
4696iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4697{
4698 struct ieee80211_node *ni = vap->iv_bss;
4699 struct iwm_mcast_filter_cmd *cmd;
4700 size_t size;
4701 int error;
4702
4703 size = roundup(sizeof(*cmd), 4);
4704 cmd = kmalloc(size, M_DEVBUF, M_INTWAIT | M_ZERO);
4705 if (cmd == NULL)
4706 return ENOMEM;
4707 cmd->filter_own = 1;
4708 cmd->port_id = 0;
4709 cmd->count = 0;
4710 cmd->pass_all = 1;
4711 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4712
4713 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4714 IWM_CMD_SYNC, size, cmd);
4715 kfree(cmd, M_DEVBUF);
4716
4717 return (error);
4718}
4719
4720/*
4721 * ifnet interfaces
4722 */
4723
4724static void
4725iwm_init(struct iwm_softc *sc)
4726{
4727 int error;
4728
4729 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4730 return;
4731 }
4732 sc->sc_generation++;
4733 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4734
4735 if ((error = iwm_init_hw(sc)) != 0) {
4736 kprintf("iwm_init_hw failed %d\n", error);
4737 iwm_stop(sc);
4738 return;
4739 }
4740
4741 /*
4742 * Ok, firmware loaded and we are jogging
4743 */
4744 sc->sc_flags |= IWM_FLAG_HW_INITED;
4745 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4746}
4747
4748static int
4749iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4750{
4751 struct iwm_softc *sc;
4752 int error;
4753
4754 sc = ic->ic_softc;
4755
4756 IWM_LOCK(sc);
4757 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4758 IWM_UNLOCK(sc);
4759 return (ENXIO);
4760 }
4761 error = mbufq_enqueue(&sc->sc_snd, m);
4762 if (error) {
4763 IWM_UNLOCK(sc);
4764 return (error);
4765 }
4766 iwm_start(sc);
4767 IWM_UNLOCK(sc);
4768 return (0);
4769}
4770
4771/*
4772 * Dequeue packets from sendq and call send.
4773 */
4774static void
4775iwm_start(struct iwm_softc *sc)
4776{
4777 struct ieee80211_node *ni;
4778 struct mbuf *m;
4779 int ac = 0;
4780
4781 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4782 while (sc->qfullmsk == 0 &&
4783 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4784 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4785 if (iwm_tx(sc, m, ni, ac) != 0) {
4786 if_inc_counter(ni->ni_vap->iv_ifp,
4787 IFCOUNTER_OERRORS, 1);
4788 ieee80211_free_node(ni);
4789 continue;
4790 }
4791 sc->sc_tx_timer = 15;
4792 }
4793 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4794}
4795
4796static void
4797iwm_stop(struct iwm_softc *sc)
4798{
4799
4800 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4801 sc->sc_flags |= IWM_FLAG_STOPPED;
4802 sc->sc_generation++;
4803 iwm_led_blink_stop(sc);
4804 sc->sc_tx_timer = 0;
4805 iwm_stop_device(sc);
4806}
4807
4808static void
4809iwm_watchdog(void *arg)
4810{
4811 struct iwm_softc *sc = arg;
4812
4813 if (sc->sc_tx_timer > 0) {
4814 if (--sc->sc_tx_timer == 0) {
4815 device_printf(sc->sc_dev, "device timeout\n");
4816#ifdef IWM_DEBUG
4817 iwm_nic_error(sc);
4818#endif
4819 iwm_stop(sc);
4820#if defined(__DragonFly__)
4821 ++sc->sc_ic.ic_oerrors;
4822#else
4823 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4824#endif
4825 return;
4826 }
4827 }
4828 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4829}
4830
4831static void
4832iwm_parent(struct ieee80211com *ic)
4833{
4834 struct iwm_softc *sc = ic->ic_softc;
4835 int startall = 0;
4836
4837 IWM_LOCK(sc);
4838 if (ic->ic_nrunning > 0) {
4839 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4840 iwm_init(sc);
4841 startall = 1;
4842 }
4843 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4844 iwm_stop(sc);
4845 IWM_UNLOCK(sc);
4846 if (startall)
4847 ieee80211_start_all(ic);
4848}
4849
4850/*
4851 * The interrupt side of things
4852 */
4853
4854/*
4855 * error dumping routines are from iwlwifi/mvm/utils.c
4856 */
4857
4858/*
4859 * Note: This structure is read from the device with IO accesses,
4860 * and the reading already does the endian conversion. As it is
4861 * read with uint32_t-sized accesses, any members with a different size
4862 * need to be ordered correctly though!
4863 */
4864struct iwm_error_event_table {
4865 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4866 uint32_t error_id; /* type of error */
4867 uint32_t trm_hw_status0; /* TRM HW status */
4868 uint32_t trm_hw_status1; /* TRM HW status */
4869 uint32_t blink2; /* branch link */
4870 uint32_t ilink1; /* interrupt link */
4871 uint32_t ilink2; /* interrupt link */
4872 uint32_t data1; /* error-specific data */
4873 uint32_t data2; /* error-specific data */
4874 uint32_t data3; /* error-specific data */
4875 uint32_t bcon_time; /* beacon timer */
4876 uint32_t tsf_low; /* network timestamp function timer */
4877 uint32_t tsf_hi; /* network timestamp function timer */
4878 uint32_t gp1; /* GP1 timer register */
4879 uint32_t gp2; /* GP2 timer register */
4880 uint32_t fw_rev_type; /* firmware revision type */
4881 uint32_t major; /* uCode version major */
4882 uint32_t minor; /* uCode version minor */
4883 uint32_t hw_ver; /* HW Silicon version */
4884 uint32_t brd_ver; /* HW board version */
4885 uint32_t log_pc; /* log program counter */
4886 uint32_t frame_ptr; /* frame pointer */
4887 uint32_t stack_ptr; /* stack pointer */
4888 uint32_t hcmd; /* last host command header */
4889 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4890 * rxtx_flag */
4891 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4892 * host_flag */
4893 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4894 * enc_flag */
4895 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4896 * time_flag */
4897 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4898 * wico interrupt */
4899 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
4900 uint32_t wait_event; /* wait event() caller address */
4901 uint32_t l2p_control; /* L2pControlField */
4902 uint32_t l2p_duration; /* L2pDurationField */
4903 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4904 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4905 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
4906 * (LMPM_PMG_SEL) */
4907 uint32_t u_timestamp; /* indicate when the date and time of the
4908 * compilation */
4909 uint32_t flow_handler; /* FH read/write pointers, RX credit */
4910} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4911
4912/*
4913 * UMAC error struct - relevant starting from family 8000 chip.
4914 * Note: This structure is read from the device with IO accesses,
4915 * and the reading already does the endian conversion. As it is
4916 * read with u32-sized accesses, any members with a different size
4917 * need to be ordered correctly though!
4918 */
4919struct iwm_umac_error_event_table {
4920 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4921 uint32_t error_id; /* type of error */
4922 uint32_t blink1; /* branch link */
4923 uint32_t blink2; /* branch link */
4924 uint32_t ilink1; /* interrupt link */
4925 uint32_t ilink2; /* interrupt link */
4926 uint32_t data1; /* error-specific data */
4927 uint32_t data2; /* error-specific data */
4928 uint32_t data3; /* error-specific data */
4929 uint32_t umac_major;
4930 uint32_t umac_minor;
4931 uint32_t frame_pointer; /* core register 27*/
4932 uint32_t stack_pointer; /* core register 28 */
4933 uint32_t cmd_header; /* latest host cmd sent to UMAC */
4934 uint32_t nic_isr_pref; /* ISR status register */
4935} __packed;
4936
4937#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
4938#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
4939
4940#ifdef IWM_DEBUG
4941struct {
4942 const char *name;
4943 uint8_t num;
4944} advanced_lookup[] = {
4945 { "NMI_INTERRUPT_WDG", 0x34 },
4946 { "SYSASSERT", 0x35 },
4947 { "UCODE_VERSION_MISMATCH", 0x37 },
4948 { "BAD_COMMAND", 0x38 },
4949 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4950 { "FATAL_ERROR", 0x3D },
4951 { "NMI_TRM_HW_ERR", 0x46 },
4952 { "NMI_INTERRUPT_TRM", 0x4C },
4953 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4954 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4955 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4956 { "NMI_INTERRUPT_HOST", 0x66 },
4957 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4958 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4959 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4960 { "ADVANCED_SYSASSERT", 0 },
4961};
4962
4963static const char *
4964iwm_desc_lookup(uint32_t num)
4965{
4966 int i;
4967
4968 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4969 if (advanced_lookup[i].num == num)
4970 return advanced_lookup[i].name;
4971
4972 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4973 return advanced_lookup[i].name;
4974}
4975
4976static void
4977iwm_nic_umac_error(struct iwm_softc *sc)
4978{
4979 struct iwm_umac_error_event_table table;
4980 uint32_t base;
4981
4982 base = sc->sc_uc.uc_umac_error_event_table;
4983
4984 if (base < 0x800000) {
4985 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4986 base);
4987 return;
4988 }
4989
4990 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4991 device_printf(sc->sc_dev, "reading errlog failed\n");
4992 return;
4993 }
4994
4995 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4996 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4997 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4998 sc->sc_flags, table.valid);
4999 }
5000
5001 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5002 iwm_desc_lookup(table.error_id));
5003 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5004 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5005 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5006 table.ilink1);
5007 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5008 table.ilink2);
5009 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5010 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5011 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5012 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5013 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5014 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5015 table.frame_pointer);
5016 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5017 table.stack_pointer);
5018 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5019 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5020 table.nic_isr_pref);
5021}
5022
5023/*
5024 * Support for dumping the error log seemed like a good idea ...
5025 * but it's mostly hex junk and the only sensible thing is the
5026 * hw/ucode revision (which we know anyway). Since it's here,
5027 * I'll just leave it in, just in case e.g. the Intel guys want to
5028 * help us decipher some "ADVANCED_SYSASSERT" later.
5029 */
5030static void
5031iwm_nic_error(struct iwm_softc *sc)
5032{
5033 struct iwm_error_event_table table;
5034 uint32_t base;
5035
5036 device_printf(sc->sc_dev, "dumping device error log\n");
5037 base = sc->sc_uc.uc_error_event_table;
5038 if (base < 0x800000) {
5039 device_printf(sc->sc_dev,
5040 "Invalid error log pointer 0x%08x\n", base);
5041 return;
5042 }
5043
5044 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5045 device_printf(sc->sc_dev, "reading errlog failed\n");
5046 return;
5047 }
5048
5049 if (!table.valid) {
5050 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5051 return;
5052 }
5053
5054 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5055 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5056 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5057 sc->sc_flags, table.valid);
5058 }
5059
5060 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5061 iwm_desc_lookup(table.error_id));
5062 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5063 table.trm_hw_status0);
5064 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5065 table.trm_hw_status1);
5066 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5067 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5068 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5069 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5070 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5071 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5072 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5073 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5074 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5075 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5076 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5077 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5078 table.fw_rev_type);
5079 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5080 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5081 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5082 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5083 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5084 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5085 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5086 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5087 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5088 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5089 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5090 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5091 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5092 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5093 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5094 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5095 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5096 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5097 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5098
5099 if (sc->sc_uc.uc_umac_error_event_table)
5100 iwm_nic_umac_error(sc);
5101}
5102#endif
5103
5104#define SYNC_RESP_STRUCT(_var_, _pkt_) \
5105do { \
5106 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5107 _var_ = (void *)((_pkt_)+1); \
5108} while (/*CONSTCOND*/0)
5109
5110#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
5111do { \
5112 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5113 _ptr_ = (void *)((_pkt_)+1); \
5114} while (/*CONSTCOND*/0)
5115
5116#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5117
5118/*
5119 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5120 * Basic structure from if_iwn
5121 */
5122static void
5123iwm_notif_intr(struct iwm_softc *sc)
5124{
5125 uint16_t hw;
5126
5127 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5128 BUS_DMASYNC_POSTREAD);
5129
5130 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5131
5132 /*
5133 * Process responses
5134 */
5135 while (sc->rxq.cur != hw) {
5136 struct iwm_rx_ring *ring = &sc->rxq;
5137 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5138 struct iwm_rx_packet *pkt;
5139 struct iwm_cmd_response *cresp;
5140 int qid, idx, code;
5141
5142 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5143 BUS_DMASYNC_POSTREAD);
5144 pkt = mtod(data->m, struct iwm_rx_packet *);
5145
5146 qid = pkt->hdr.qid & ~0x80;
5147 idx = pkt->hdr.idx;
5148
5149 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5150 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5151 "rx packet qid=%d idx=%d type=%x %d %d\n",
5152 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5153
5154 /*
5155 * randomly get these from the firmware, no idea why.
5156 * they at least seem harmless, so just ignore them for now
5157 */
5158 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5159 || pkt->len_n_flags == htole32(0x55550000))) {
5160 ADVANCE_RXQ(sc);
5161 continue;
5162 }
5163
5164 switch (code) {
5165 case IWM_REPLY_RX_PHY_CMD:
5166 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5167 break;
5168
5169 case IWM_REPLY_RX_MPDU_CMD:
5170 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5171 break;
5172
5173 case IWM_TX_CMD:
5174 iwm_mvm_rx_tx_cmd(sc, pkt, data);
5175 break;
5176
5177 case IWM_MISSED_BEACONS_NOTIFICATION: {
5178 struct iwm_missed_beacons_notif *resp;
5179 int missed;
5180
5181 /* XXX look at mac_id to determine interface ID */
5182 struct ieee80211com *ic = &sc->sc_ic;
5183 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5184
5185 SYNC_RESP_STRUCT(resp, pkt);
5186 missed = le32toh(resp->consec_missed_beacons);
5187
5188 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5189 "%s: MISSED_BEACON: mac_id=%d, "
5190 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5191 "num_rx=%d\n",
5192 __func__,
5193 le32toh(resp->mac_id),
5194 le32toh(resp->consec_missed_beacons_since_last_rx),
5195 le32toh(resp->consec_missed_beacons),
5196 le32toh(resp->num_expected_beacons),
5197 le32toh(resp->num_recvd_beacons));
5198
5199 /* Be paranoid */
5200 if (vap == NULL)
5201 break;
5202
5203 /* XXX no net80211 locking? */
5204 if (vap->iv_state == IEEE80211_S_RUN &&
5205 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5206 if (missed > vap->iv_bmissthreshold) {
5207 /* XXX bad locking; turn into task */
5208 IWM_UNLOCK(sc);
5209 ieee80211_beacon_miss(ic);
5210 IWM_LOCK(sc);
5211 }
5212 }
5213
5214 break; }
5215
5216 case IWM_MFUART_LOAD_NOTIFICATION:
5217 break;
5218
5219 case IWM_MVM_ALIVE: {
5220 struct iwm_mvm_alive_resp_v1 *resp1;
5221 struct iwm_mvm_alive_resp_v2 *resp2;
5222 struct iwm_mvm_alive_resp_v3 *resp3;
5223
5224 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5225 SYNC_RESP_STRUCT(resp1, pkt);
5226 sc->sc_uc.uc_error_event_table
5227 = le32toh(resp1->error_event_table_ptr);
5228 sc->sc_uc.uc_log_event_table
5229 = le32toh(resp1->log_event_table_ptr);
5230 sc->sched_base = le32toh(resp1->scd_base_ptr);
5231 if (resp1->status == IWM_ALIVE_STATUS_OK)
5232 sc->sc_uc.uc_ok = 1;
5233 else
5234 sc->sc_uc.uc_ok = 0;
5235 }
5236
5237 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5238 SYNC_RESP_STRUCT(resp2, pkt);
5239 sc->sc_uc.uc_error_event_table
5240 = le32toh(resp2->error_event_table_ptr);
5241 sc->sc_uc.uc_log_event_table
5242 = le32toh(resp2->log_event_table_ptr);
5243 sc->sched_base = le32toh(resp2->scd_base_ptr);
5244 sc->sc_uc.uc_umac_error_event_table
5245 = le32toh(resp2->error_info_addr);
5246 if (resp2->status == IWM_ALIVE_STATUS_OK)
5247 sc->sc_uc.uc_ok = 1;
5248 else
5249 sc->sc_uc.uc_ok = 0;
5250 }
5251
5252 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5253 SYNC_RESP_STRUCT(resp3, pkt);
5254 sc->sc_uc.uc_error_event_table
5255 = le32toh(resp3->error_event_table_ptr);
5256 sc->sc_uc.uc_log_event_table
5257 = le32toh(resp3->log_event_table_ptr);
5258 sc->sched_base = le32toh(resp3->scd_base_ptr);
5259 sc->sc_uc.uc_umac_error_event_table
5260 = le32toh(resp3->error_info_addr);
5261 if (resp3->status == IWM_ALIVE_STATUS_OK)
5262 sc->sc_uc.uc_ok = 1;
5263 else
5264 sc->sc_uc.uc_ok = 0;
5265 }
5266
5267 sc->sc_uc.uc_intr = 1;
5268 wakeup(&sc->sc_uc);
5269 break; }
5270
5271 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5272 struct iwm_calib_res_notif_phy_db *phy_db_notif;
5273 SYNC_RESP_STRUCT(phy_db_notif, pkt);
5274
5275 iwm_phy_db_set_section(sc->sc_phy_db, phy_db_notif);
5276
5277 break; }
5278
5279 case IWM_STATISTICS_NOTIFICATION: {
5280 struct iwm_notif_statistics *stats;
5281 SYNC_RESP_STRUCT(stats, pkt);
5282 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5283 sc->sc_noise = iwm_get_noise(&stats->rx.general);
5284 break; }
5285
5286 case IWM_NVM_ACCESS_CMD:
5287 case IWM_MCC_UPDATE_CMD:
5288 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5289 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5290 BUS_DMASYNC_POSTREAD);
5291 memcpy(sc->sc_cmd_resp,
5292 pkt, sizeof(sc->sc_cmd_resp));
5293 }
5294 break;
5295
5296 case IWM_MCC_CHUB_UPDATE_CMD: {
5297 struct iwm_mcc_chub_notif *notif;
5298 SYNC_RESP_STRUCT(notif, pkt);
5299
5300 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5301 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5302 sc->sc_fw_mcc[2] = '\0';
5303 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5304 "fw source %d sent CC '%s'\n",
5305 notif->source_id, sc->sc_fw_mcc);
5306 break; }
5307
5308 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5309 break;
5310
5311 case IWM_PHY_CONFIGURATION_CMD:
5312 case IWM_TX_ANT_CONFIGURATION_CMD:
5313 case IWM_ADD_STA:
5314 case IWM_MAC_CONTEXT_CMD:
5315 case IWM_REPLY_SF_CFG_CMD:
5316 case IWM_POWER_TABLE_CMD:
5317 case IWM_PHY_CONTEXT_CMD:
5318 case IWM_BINDING_CONTEXT_CMD:
5319 case IWM_TIME_EVENT_CMD:
5320 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5321 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5322 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5323 case IWM_REPLY_BEACON_FILTERING_CMD:
5324 case IWM_MAC_PM_POWER_TABLE:
5325 case IWM_TIME_QUOTA_CMD:
5326 case IWM_REMOVE_STA:
5327 case IWM_TXPATH_FLUSH:
5328 case IWM_LQ_CMD:
5329 case IWM_BT_CONFIG:
5330 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5331 SYNC_RESP_STRUCT(cresp, pkt);
5332 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5333 memcpy(sc->sc_cmd_resp,
5334 pkt, sizeof(*pkt)+sizeof(*cresp));
5335 }
5336 break;
5337
5338 /* ignore */
5339 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5340 break;
5341
5342 case IWM_INIT_COMPLETE_NOTIF:
5343 sc->sc_init_complete = 1;
5344 wakeup(&sc->sc_init_complete);
5345 break;
5346
5347 case IWM_SCAN_OFFLOAD_COMPLETE: {
5348 struct iwm_periodic_scan_complete *notif;
5349 SYNC_RESP_STRUCT(notif, pkt);
5350
5351 break; }
5352
5353 case IWM_SCAN_ITERATION_COMPLETE: {
5354 struct iwm_lmac_scan_complete_notif *notif;
5355 SYNC_RESP_STRUCT(notif, pkt);
5356 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5357 break; }
5358
5359 case IWM_SCAN_COMPLETE_UMAC: {
5360 struct iwm_umac_scan_complete *notif;
5361 SYNC_RESP_STRUCT(notif, pkt);
5362
5363 IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5364 "UMAC scan complete, status=0x%x\n",
5365 notif->status);
5366#if 0 /* XXX This would be a duplicate scan end call */
5367 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5368#endif
5369 break;
5370 }
5371
5372 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5373 struct iwm_umac_scan_iter_complete_notif *notif;
5374 SYNC_RESP_STRUCT(notif, pkt);
5375
5376 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5377 "complete, status=0x%x, %d channels scanned\n",
5378 notif->status, notif->scanned_channels);
5379 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5380 break;
5381 }
5382
5383 case IWM_REPLY_ERROR: {
5384 struct iwm_error_resp *resp;
5385 SYNC_RESP_STRUCT(resp, pkt);
5386
5387 device_printf(sc->sc_dev,
5388 "firmware error 0x%x, cmd 0x%x\n",
5389 le32toh(resp->error_type),
5390 resp->cmd_id);
5391 break; }
5392
5393 case IWM_TIME_EVENT_NOTIFICATION: {
5394 struct iwm_time_event_notif *notif;
5395 SYNC_RESP_STRUCT(notif, pkt);
5396
5397 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5398 "TE notif status = 0x%x action = 0x%x\n",
5399 notif->status, notif->action);
5400 break; }
5401
5402 case IWM_MCAST_FILTER_CMD:
5403 break;
5404
5405 case IWM_SCD_QUEUE_CFG: {
5406 struct iwm_scd_txq_cfg_rsp *rsp;
5407 SYNC_RESP_STRUCT(rsp, pkt);
5408
5409 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5410 "queue cfg token=0x%x sta_id=%d "
5411 "tid=%d scd_queue=%d\n",
5412 rsp->token, rsp->sta_id, rsp->tid,
5413 rsp->scd_queue);
5414 break;
5415 }
5416
5417 default:
5418 device_printf(sc->sc_dev,
5419 "frame %d/%d %x UNHANDLED (this should "
5420 "not happen)\n", qid, idx,
5421 pkt->len_n_flags);
5422 break;
5423 }
5424
5425 /*
5426 * Why test bit 0x80? The Linux driver:
5427 *
5428 * There is one exception: uCode sets bit 15 when it
5429 * originates the response/notification, i.e. when the
5430 * response/notification is not a direct response to a
5431 * command sent by the driver. For example, uCode issues
5432 * IWM_REPLY_RX when it sends a received frame to the driver;
5433 * it is not a direct response to any driver command.
5434 *
5435 * Ok, so since when is 7 == 15? Well, the Linux driver
5436 * uses a slightly different format for pkt->hdr, and "qid"
5437 * is actually the upper byte of a two-byte field.
5438 */
5439 if (!(pkt->hdr.qid & (1 << 7))) {
5440 iwm_cmd_done(sc, pkt);
5441 }
5442
5443 ADVANCE_RXQ(sc);
5444 }
5445
5446 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5447 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5448
5449 /*
5450 * Tell the firmware what we have processed.
5451 * Seems like the hardware gets upset unless we align
5452 * the write by 8??
5453 */
5454 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5455 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5456}
5457
5458static void
5459iwm_intr(void *arg)
5460{
5461 struct iwm_softc *sc = arg;
5462 int handled = 0;
5463 int r1, r2, rv = 0;
5464 int isperiodic = 0;
5465
5466#if defined(__DragonFly__)
5467 if (sc->sc_mem == NULL) {
5468 kprintf("iwm_intr: detached\n");
5469 return;
5470 }
5471#endif
5472 IWM_LOCK(sc);
5473 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5474
5475 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5476 uint32_t *ict = sc->ict_dma.vaddr;
5477 int tmp;
5478
5479 tmp = htole32(ict[sc->ict_cur]);
5480 if (!tmp)
5481 goto out_ena;
5482
5483 /*
5484 * ok, there was something. keep plowing until we have all.
5485 */
5486 r1 = r2 = 0;
5487 while (tmp) {
5488 r1 |= tmp;
5489 ict[sc->ict_cur] = 0;
5490 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5491 tmp = htole32(ict[sc->ict_cur]);
5492 }
5493
5494 /* this is where the fun begins. don't ask */
5495 if (r1 == 0xffffffff)
5496 r1 = 0;
5497
5498 /* i am not expected to understand this */
5499 if (r1 & 0xc0000)
5500 r1 |= 0x8000;
5501 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5502 } else {
5503 r1 = IWM_READ(sc, IWM_CSR_INT);
5504 /* "hardware gone" (where, fishing?) */
5505 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5506 goto out;
5507 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5508 }
5509 if (r1 == 0 && r2 == 0) {
5510 goto out_ena;
5511 }
5512
5513 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5514
5515 /* ignored */
5516 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5517
5518 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5519 int i;
5520 struct ieee80211com *ic = &sc->sc_ic;
5521 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5522
5523#ifdef IWM_DEBUG
5524 iwm_nic_error(sc);
5525#endif
5526 /* Dump driver status (TX and RX rings) while we're here. */
5527 device_printf(sc->sc_dev, "driver status:\n");
5528 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5529 struct iwm_tx_ring *ring = &sc->txq[i];
5530 device_printf(sc->sc_dev,
5531 " tx ring %2d: qid=%-2d cur=%-3d "
5532 "queued=%-3d\n",
5533 i, ring->qid, ring->cur, ring->queued);
5534 }
5535 device_printf(sc->sc_dev,
5536 " rx ring: cur=%d\n", sc->rxq.cur);
5537 device_printf(sc->sc_dev,
5538 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5539
5540 /* Don't stop the device; just do a VAP restart */
5541 IWM_UNLOCK(sc);
5542
5543 if (vap == NULL) {
5544 kprintf("%s: null vap\n", __func__);
5545 return;
5546 }
5547
5548 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5549 "restarting\n", __func__, vap->iv_state);
5550
5551 /* XXX TODO: turn this into a callout/taskqueue */
5552 ieee80211_restart_all(ic);
5553 return;
5554 }
5555
5556 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5557 handled |= IWM_CSR_INT_BIT_HW_ERR;
5558 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5559 iwm_stop(sc);
5560 rv = 1;
5561 goto out;
5562 }
5563
5564 /* firmware chunk loaded */
5565 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5566 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5567 handled |= IWM_CSR_INT_BIT_FH_TX;
5568 sc->sc_fw_chunk_done = 1;
5569 wakeup(&sc->sc_fw);
5570 }
5571
5572 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5573 handled |= IWM_CSR_INT_BIT_RF_KILL;
5574 if (iwm_check_rfkill(sc)) {
5575 device_printf(sc->sc_dev,
5576 "%s: rfkill switch, disabling interface\n",
5577 __func__);
5578 iwm_stop(sc);
5579 }
5580 }
5581
5582 /*
5583 * The Linux driver uses periodic interrupts to avoid races.
5584 * We cargo-cult like it's going out of fashion.
5585 */
5586 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5587 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5588 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5589 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5590 IWM_WRITE_1(sc,
5591 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5592 isperiodic = 1;
5593 }
5594
5595 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5596 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5597 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5598
5599 iwm_notif_intr(sc);
5600
5601 /* enable periodic interrupt, see above */
5602 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5603 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5604 IWM_CSR_INT_PERIODIC_ENA);
5605 }
5606
5607 if (__predict_false(r1 & ~handled))
5608 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5609 "%s: unhandled interrupts: %x\n", __func__, r1);
5610 rv = 1;
5611
5612 out_ena:
5613 iwm_restore_interrupts(sc);
5614 out:
5615 IWM_UNLOCK(sc);
5616 return;
5617}
5618
5619/*
5620 * Autoconf glue-sniffing
5621 */
5622#define PCI_VENDOR_INTEL 0x8086
5623#define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5624#define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5625#define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5626#define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5627#define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5628#define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5629#define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5630#define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5631#define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5632#define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5633
5634static const struct iwm_devices {
5635 uint16_t device;
5636 const char *name;
5637} iwm_devices[] = {
5638 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5639 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5640 { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5641 { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5642 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5643 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5644 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5645 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5646 { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5647 { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5648};
5649
5650static int
5651iwm_probe(device_t dev)
5652{
5653 int i;
5654
5655 for (i = 0; i < nitems(iwm_devices); i++) {
5656 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5657 pci_get_device(dev) == iwm_devices[i].device) {
5658 device_set_desc(dev, iwm_devices[i].name);
5659 return (BUS_PROBE_DEFAULT);
5660 }
5661 }
5662
5663 return (ENXIO);
5664}
5665
5666static int
5667iwm_dev_check(device_t dev)
5668{
5669 struct iwm_softc *sc;
5670
5671 sc = device_get_softc(dev);
5672
5673 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5674 switch (pci_get_device(dev)) {
5675 case PCI_PRODUCT_INTEL_WL_3160_1:
5676 case PCI_PRODUCT_INTEL_WL_3160_2:
5677 sc->sc_fwname = "iwm3160fw";
5678 sc->host_interrupt_operation_mode = 1;
5679 sc->eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000;
5680 sc->nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000;
5681 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5682 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5683 return (0);
5684 case PCI_PRODUCT_INTEL_WL_3165_1:
5685 case PCI_PRODUCT_INTEL_WL_3165_2:
5686 sc->sc_fwname = "iwm7265fw";
5687 sc->host_interrupt_operation_mode = 0;
5688 sc->eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000;
5689 sc->nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000;
5690 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5691 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5692 return (0);
5693 case PCI_PRODUCT_INTEL_WL_7260_1:
5694 case PCI_PRODUCT_INTEL_WL_7260_2:
5695 sc->sc_fwname = "iwm7260fw";
5696 sc->host_interrupt_operation_mode = 1;
5697 sc->eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000;
5698 sc->nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000;
5699 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5700 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5701 return (0);
5702 case PCI_PRODUCT_INTEL_WL_7265_1:
5703 case PCI_PRODUCT_INTEL_WL_7265_2:
5704 sc->sc_fwname = "iwm7265fw";
5705 sc->host_interrupt_operation_mode = 0;
5706 sc->eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000;
5707 sc->nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000;
5708 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5709 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5710 return (0);
5711 case PCI_PRODUCT_INTEL_WL_8260_1:
5712 case PCI_PRODUCT_INTEL_WL_8260_2:
5713 sc->sc_fwname = "iwm8000Cfw";
5714 sc->host_interrupt_operation_mode = 0;
5715 sc->eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000;
5716 sc->nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000;
5717 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5718 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5719 return (0);
5720 default:
5721 device_printf(dev, "unknown adapter type\n");
5722 return ENXIO;
5723 }
5724}
5725
5726static int
5727iwm_pci_attach(device_t dev)
5728{
5729 struct iwm_softc *sc;
5730 int count, error, rid;
5731 uint16_t reg;
5732#if defined(__DragonFly__)
5733 int irq_flags;
5734#endif
5735
5736 sc = device_get_softc(dev);
5737
5738 /* Clear device-specific "PCI retry timeout" register (41h). */
5739 reg = pci_read_config(dev, 0x40, sizeof(reg));
5740 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5741
5742 /* Enable bus-mastering and hardware bug workaround. */
5743 pci_enable_busmaster(dev);
5744 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5745 /* if !MSI */
5746 if (reg & PCIM_STATUS_INTxSTATE) {
5747 reg &= ~PCIM_STATUS_INTxSTATE;
5748 }
5749 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5750
5751 rid = PCIR_BAR(0);
5752 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5753 RF_ACTIVE);
5754 if (sc->sc_mem == NULL) {
5755 device_printf(sc->sc_dev, "can't map mem space\n");
5756 return (ENXIO);
5757 }
5758 sc->sc_st = rman_get_bustag(sc->sc_mem);
5759 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5760
5761 /* Install interrupt handler. */
5762 count = 1;
5763 rid = 0;
5764#if defined(__DragonFly__)
5765 pci_alloc_1intr(dev, iwm_msi_enable, &rid, &irq_flags);
5766 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, irq_flags);
5767#else
5768 if (pci_alloc_msi(dev, &count) == 0)
5769 rid = 1;
5770 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5771 (rid != 0 ? 0 : RF_SHAREABLE));
5772#endif
5773 if (sc->sc_irq == NULL) {
5774 device_printf(dev, "can't map interrupt\n");
5775 return (ENXIO);
5776 }
5777#if defined(__DragonFly__)
5778 error = bus_setup_intr(dev, sc->sc_irq, INTR_MPSAFE,
5779 iwm_intr, sc, &sc->sc_ih,
5780 &wlan_global_serializer);
5781#else
5782 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5783 NULL, iwm_intr, sc, &sc->sc_ih);
5784#endif
5785 if (sc->sc_ih == NULL) {
5786 device_printf(dev, "can't establish interrupt");
5787#if defined(__DragonFly__)
5788 pci_release_msi(dev);
5789#endif
5790 return (ENXIO);
5791 }
5792 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5793
5794 return (0);
5795}
5796
5797static void
5798iwm_pci_detach(device_t dev)
5799{
5800 struct iwm_softc *sc = device_get_softc(dev);
5801
5802 if (sc->sc_irq != NULL) {
5803 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5804 bus_release_resource(dev, SYS_RES_IRQ,
5805 rman_get_rid(sc->sc_irq), sc->sc_irq);
5806 pci_release_msi(dev);
5807#if defined(__DragonFly__)
5808 sc->sc_irq = NULL;
5809#endif
5810 }
5811 if (sc->sc_mem != NULL) {
5812 bus_release_resource(dev, SYS_RES_MEMORY,
5813 rman_get_rid(sc->sc_mem), sc->sc_mem);
5814#if defined(__DragonFly__)
5815 sc->sc_mem = NULL;
5816#endif
5817 }
5818}
5819
5820
5821
5822static int
5823iwm_attach(device_t dev)
5824{
5825 struct iwm_softc *sc = device_get_softc(dev);
5826 struct ieee80211com *ic = &sc->sc_ic;
5827 int error;
5828 int txq_i, i;
5829
5830 sc->sc_dev = dev;
5831 sc->sc_attached = 1;
5832 IWM_LOCK_INIT(sc);
5833 mbufq_init(&sc->sc_snd, ifqmaxlen);
5834#if defined(__DragonFly__)
5835 callout_init_lk(&sc->sc_watchdog_to, &sc->sc_lk);
5836#else
5837 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5838#endif
5839 callout_init(&sc->sc_led_blink_to);
5840 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5841 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5842 taskqueue_thread_enqueue, &sc->sc_tq);
5843#if defined(__DragonFly__)
5844 error = taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON,
5845 -1, "iwm_taskq");
5846#else
5847 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5848#endif
5849 if (error != 0) {
5850 device_printf(dev, "can't start threads, error %d\n",
5851 error);
5852 goto fail;
5853 }
5854
5855 /* Init phy db */
5856 sc->sc_phy_db = iwm_phy_db_init(sc);
5857 if (!sc->sc_phy_db) {
5858 device_printf(dev, "Cannot init phy_db\n");
5859 goto fail;
5860 }
5861
5862 /* PCI attach */
5863 error = iwm_pci_attach(dev);
5864 if (error != 0)
5865 goto fail;
5866
5867 sc->sc_wantresp = -1;
5868
5869 /* Check device type */
5870 error = iwm_dev_check(dev);
5871 if (error != 0)
5872 goto fail;
5873
5874 /*
5875 * We now start fiddling with the hardware
5876 */
5877 /*
5878 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5879 * changed, and now the revision step also includes bit 0-1 (no more
5880 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5881 * in the old format.
5882 */
5883 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5884 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5885 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5886
5887 if (iwm_prepare_card_hw(sc) != 0) {
5888 device_printf(dev, "could not initialize hardware\n");
5889 goto fail;
5890 }
5891
5892 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5893 int ret;
5894 uint32_t hw_step;
5895
5896 /*
5897 * In order to recognize C step the driver should read the
5898 * chip version id located at the AUX bus MISC address.
5899 */
5900 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5901 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5902 DELAY(2);
5903
5904 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5905 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5906 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5907 25000);
5908 if (!ret) {
5909 device_printf(sc->sc_dev,
5910 "Failed to wake up the nic\n");
5911 goto fail;
5912 }
5913
5914 if (iwm_nic_lock(sc)) {
5915 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5916 hw_step |= IWM_ENABLE_WFPM;
5917 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5918 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5919 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5920 if (hw_step == 0x3)
5921 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5922 (IWM_SILICON_C_STEP << 2);
5923 iwm_nic_unlock(sc);
5924 } else {
5925 device_printf(sc->sc_dev, "Failed to lock the nic\n");
5926 goto fail;
5927 }
5928 }
5929
5930 /* Allocate DMA memory for firmware transfers. */
5931 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5932 device_printf(dev, "could not allocate memory for firmware\n");
5933 goto fail;
5934 }
5935
5936 /* Allocate "Keep Warm" page. */
5937 if ((error = iwm_alloc_kw(sc)) != 0) {
5938 device_printf(dev, "could not allocate keep warm page\n");
5939 goto fail;
5940 }
5941
5942 /* We use ICT interrupts */
5943 if ((error = iwm_alloc_ict(sc)) != 0) {
5944 device_printf(dev, "could not allocate ICT table\n");
5945 goto fail;
5946 }
5947
5948 /* Allocate TX scheduler "rings". */
5949 if ((error = iwm_alloc_sched(sc)) != 0) {
5950 device_printf(dev, "could not allocate TX scheduler rings\n");
5951 goto fail;
5952 }
5953
5954 /* Allocate TX rings */
5955 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5956 if ((error = iwm_alloc_tx_ring(sc,
5957 &sc->txq[txq_i], txq_i)) != 0) {
5958 device_printf(dev,
5959 "could not allocate TX ring %d\n",
5960 txq_i);
5961 goto fail;
5962 }
5963 }
5964
5965 /* Allocate RX ring. */
5966 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5967 device_printf(dev, "could not allocate RX ring\n");
5968 goto fail;
5969 }
5970
5971 /* Clear pending interrupts. */
5972 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5973
5974 ic->ic_softc = sc;
5975 ic->ic_name = device_get_nameunit(sc->sc_dev);
5976 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
5977 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
5978
5979 /* Set device capabilities. */
5980 ic->ic_caps =
5981 IEEE80211_C_STA |
5982 IEEE80211_C_WPA | /* WPA/RSN */
5983 IEEE80211_C_WME |
5984 IEEE80211_C_SHSLOT | /* short slot time supported */
5985 IEEE80211_C_SHPREAMBLE /* short preamble supported */
5986// IEEE80211_C_BGSCAN /* capable of bg scanning */
5987 ;
5988 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5989 sc->sc_phyctxt[i].id = i;
5990 sc->sc_phyctxt[i].color = 0;
5991 sc->sc_phyctxt[i].ref = 0;
5992 sc->sc_phyctxt[i].channel = NULL;
5993 }
5994
5995 /* Max RSSI */
5996 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5997 sc->sc_preinit_hook.ich_func = iwm_preinit;
5998 sc->sc_preinit_hook.ich_arg = sc;
5999 sc->sc_preinit_hook.ich_desc = "iwm";
6000 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6001 device_printf(dev, "config_intrhook_establish failed\n");
6002 goto fail;
6003 }
6004
6005#ifdef IWM_DEBUG
6006 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6007 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6008 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6009#endif
6010
6011 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6012 "<-%s\n", __func__);
6013
6014 return 0;
6015
6016 /* Free allocated memory if something failed during attachment. */
6017fail:
6018 iwm_detach_local(sc, 0);
6019
6020 return ENXIO;
6021}
6022
6023static int
6024iwm_is_valid_ether_addr(uint8_t *addr)
6025{
6026 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6027
6028 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6029 return (FALSE);
6030
6031 return (TRUE);
6032}
6033
6034static int
6035iwm_update_edca(struct ieee80211com *ic)
6036{
6037 struct iwm_softc *sc = ic->ic_softc;
6038
6039 device_printf(sc->sc_dev, "%s: called\n", __func__);
6040 return (0);
6041}
6042
6043static void
6044iwm_preinit(void *arg)
6045{
6046 struct iwm_softc *sc = arg;
6047 device_t dev = sc->sc_dev;
6048 struct ieee80211com *ic = &sc->sc_ic;
6049 int error;
6050
6051 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6052 "->%s\n", __func__);
6053
6054 IWM_LOCK(sc);
6055 if ((error = iwm_start_hw(sc)) != 0) {
6056 device_printf(dev, "could not initialize hardware\n");
6057 IWM_UNLOCK(sc);
6058 goto fail;
6059 }
6060
6061 error = iwm_run_init_mvm_ucode(sc, 1);
6062 iwm_stop_device(sc);
6063 if (error) {
6064 IWM_UNLOCK(sc);
6065 goto fail;
6066 }
6067 device_printf(dev,
6068 "hw rev 0x%x, fw ver %s, address %s\n",
6069 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6070 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6071
6072 /* not all hardware can do 5GHz band */
6073 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6074 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6075 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6076 IWM_UNLOCK(sc);
6077
6078 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6079 ic->ic_channels);
6080
6081 /*
6082 * At this point we've committed - if we fail to do setup,
6083 * we now also have to tear down the net80211 state.
6084 */
6085 ieee80211_ifattach(ic);
6086 ic->ic_vap_create = iwm_vap_create;
6087 ic->ic_vap_delete = iwm_vap_delete;
6088 ic->ic_raw_xmit = iwm_raw_xmit;
6089 ic->ic_node_alloc = iwm_node_alloc;
6090 ic->ic_scan_start = iwm_scan_start;
6091 ic->ic_scan_end = iwm_scan_end;
6092 ic->ic_update_mcast = iwm_update_mcast;
6093 ic->ic_getradiocaps = iwm_init_channel_map;
6094 ic->ic_set_channel = iwm_set_channel;
6095 ic->ic_scan_curchan = iwm_scan_curchan;
6096 ic->ic_scan_mindwell = iwm_scan_mindwell;
6097 ic->ic_wme.wme_update = iwm_update_edca;
6098 ic->ic_parent = iwm_parent;
6099 ic->ic_transmit = iwm_transmit;
6100 iwm_radiotap_attach(sc);
6101 if (bootverbose)
6102 ieee80211_announce(ic);
6103
6104 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6105 "<-%s\n", __func__);
6106 config_intrhook_disestablish(&sc->sc_preinit_hook);
6107
6108 return;
6109fail:
6110 config_intrhook_disestablish(&sc->sc_preinit_hook);
6111 iwm_detach_local(sc, 0);
6112}
6113
6114/*
6115 * Attach the interface to 802.11 radiotap.
6116 */
6117static void
6118iwm_radiotap_attach(struct iwm_softc *sc)
6119{
6120 struct ieee80211com *ic = &sc->sc_ic;
6121
6122 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6123 "->%s begin\n", __func__);
6124 ieee80211_radiotap_attach(ic,
6125 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6126 IWM_TX_RADIOTAP_PRESENT,
6127 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6128 IWM_RX_RADIOTAP_PRESENT);
6129 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6130 "->%s end\n", __func__);
6131}
6132
6133static struct ieee80211vap *
6134iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6135 enum ieee80211_opmode opmode, int flags,
6136 const uint8_t bssid[IEEE80211_ADDR_LEN],
6137 const uint8_t mac[IEEE80211_ADDR_LEN])
6138{
6139 struct iwm_vap *ivp;
6140 struct ieee80211vap *vap;
6141
6142 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6143 return NULL;
6144 ivp = kmalloc(sizeof(struct iwm_vap), M_80211_VAP, M_INTWAIT | M_ZERO);
6145 vap = &ivp->iv_vap;
6146 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6147 vap->iv_bmissthreshold = 10; /* override default */
6148 /* Override with driver methods. */
6149 ivp->iv_newstate = vap->iv_newstate;
6150 vap->iv_newstate = iwm_newstate;
6151
6152 ieee80211_ratectl_init(vap);
6153 /* Complete setup. */
6154 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6155 mac);
6156 ic->ic_opmode = opmode;
6157
6158 return vap;
6159}
6160
6161static void
6162iwm_vap_delete(struct ieee80211vap *vap)
6163{
6164 struct iwm_vap *ivp = IWM_VAP(vap);
6165
6166 ieee80211_ratectl_deinit(vap);
6167 ieee80211_vap_detach(vap);
6168 kfree(ivp, M_80211_VAP);
6169}
6170
6171static void
6172iwm_scan_start(struct ieee80211com *ic)
6173{
6174 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6175 struct iwm_softc *sc = ic->ic_softc;
6176 int error;
6177
6178 IWM_LOCK(sc);
6179 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6180 error = iwm_mvm_umac_scan(sc);
6181 else
6182 error = iwm_mvm_lmac_scan(sc);
6183 if (error != 0) {
6184 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6185 IWM_UNLOCK(sc);
6186 ieee80211_cancel_scan(vap);
6187 } else {
6188 iwm_led_blink_start(sc);
6189 IWM_UNLOCK(sc);
6190 }
6191}
6192
6193static void
6194iwm_scan_end(struct ieee80211com *ic)
6195{
6196 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6197 struct iwm_softc *sc = ic->ic_softc;
6198
6199 IWM_LOCK(sc);
6200 iwm_led_blink_stop(sc);
6201 if (vap->iv_state == IEEE80211_S_RUN)
6202 iwm_mvm_led_enable(sc);
6203 IWM_UNLOCK(sc);
6204}
6205
6206static void
6207iwm_update_mcast(struct ieee80211com *ic)
6208{
6209}
6210
6211static void
6212iwm_set_channel(struct ieee80211com *ic)
6213{
6214}
6215
6216static void
6217iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6218{
6219}
6220
6221static void
6222iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6223{
6224 return;
6225}
6226
6227void
6228iwm_init_task(void *arg1)
6229{
6230 struct iwm_softc *sc = arg1;
6231
6232 IWM_LOCK(sc);
6233 while (sc->sc_flags & IWM_FLAG_BUSY) {
6234#if defined(__DragonFly__)
6235 lksleep(&sc->sc_flags, &sc->sc_lk, 0, "iwmpwr", 0);
6236#else
6237 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6238#endif
6239}
6240 sc->sc_flags |= IWM_FLAG_BUSY;
6241 iwm_stop(sc);
6242 if (sc->sc_ic.ic_nrunning > 0)
6243 iwm_init(sc);
6244 sc->sc_flags &= ~IWM_FLAG_BUSY;
6245 wakeup(&sc->sc_flags);
6246 IWM_UNLOCK(sc);
6247}
6248
6249static int
6250iwm_resume(device_t dev)
6251{
6252 struct iwm_softc *sc = device_get_softc(dev);
6253 int do_reinit = 0;
6254 uint16_t reg;
6255
6256 /* Clear device-specific "PCI retry timeout" register (41h). */
6257 reg = pci_read_config(dev, 0x40, sizeof(reg));
6258 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6259 iwm_init_task(device_get_softc(dev));
6260
6261 IWM_LOCK(sc);
6262 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6263 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6264 do_reinit = 1;
6265 }
6266 IWM_UNLOCK(sc);
6267
6268 if (do_reinit)
6269 ieee80211_resume_all(&sc->sc_ic);
6270
6271 return 0;
6272}
6273
6274static int
6275iwm_suspend(device_t dev)
6276{
6277 int do_stop = 0;
6278 struct iwm_softc *sc = device_get_softc(dev);
6279
6280 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6281
6282 ieee80211_suspend_all(&sc->sc_ic);
6283
6284 if (do_stop) {
6285 IWM_LOCK(sc);
6286 iwm_stop(sc);
6287 sc->sc_flags |= IWM_FLAG_SCANNING;
6288 IWM_UNLOCK(sc);
6289 }
6290
6291 return (0);
6292}
6293
6294static int
6295iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6296{
6297 struct iwm_fw_info *fw = &sc->sc_fw;
6298 device_t dev = sc->sc_dev;
6299 int i;
6300
6301 if (!sc->sc_attached)
6302 return 0;
6303 sc->sc_attached = 0;
6304 if (sc->sc_tq) {
6305#if defined(__DragonFly__)
6306 /* doesn't exist for DFly, DFly drains tasks on free */
6307#else
6308 taskqueue_drain_all(sc->sc_tq);
6309#endif
6310 taskqueue_free(sc->sc_tq);
6311#if defined(__DragonFly__)
6312 sc->sc_tq = NULL;
6313#endif
6314 }
6315 callout_drain(&sc->sc_led_blink_to);
6316 callout_drain(&sc->sc_watchdog_to);
6317 iwm_stop_device(sc);
6318 if (do_net80211) {
6319 ieee80211_ifdetach(&sc->sc_ic);
6320 }
6321
6322 iwm_phy_db_free(sc->sc_phy_db);
6323 sc->sc_phy_db = NULL;
6324
6325 iwm_free_nvm_data(sc->nvm_data);
6326
6327 /* Free descriptor rings */
6328 iwm_free_rx_ring(sc, &sc->rxq);
6329 for (i = 0; i < nitems(sc->txq); i++)
6330 iwm_free_tx_ring(sc, &sc->txq[i]);
6331
6332 /* Free firmware */
6333 if (fw->fw_fp != NULL)
6334 iwm_fw_info_free(fw);
6335
6336 /* Free scheduler */
6337 iwm_dma_contig_free(&sc->sched_dma);
6338 iwm_dma_contig_free(&sc->ict_dma);
6339 iwm_dma_contig_free(&sc->kw_dma);
6340 iwm_dma_contig_free(&sc->fw_dma);
6341
6342 /* Finished with the hardware - detach things */
6343 iwm_pci_detach(dev);
6344
6345 mbufq_drain(&sc->sc_snd);
6346 IWM_LOCK_DESTROY(sc);
6347
6348 return (0);
6349}
6350
6351static int
6352iwm_detach(device_t dev)
6353{
6354 struct iwm_softc *sc = device_get_softc(dev);
6355
6356 return (iwm_detach_local(sc, 1));
6357}
6358
6359static device_method_t iwm_pci_methods[] = {
6360 /* Device interface */
6361 DEVMETHOD(device_probe, iwm_probe),
6362 DEVMETHOD(device_attach, iwm_attach),
6363 DEVMETHOD(device_detach, iwm_detach),
6364 DEVMETHOD(device_suspend, iwm_suspend),
6365 DEVMETHOD(device_resume, iwm_resume),
6366
6367 DEVMETHOD_END
6368};
6369
6370static driver_t iwm_pci_driver = {
6371 "iwm",
6372 iwm_pci_methods,
6373 sizeof (struct iwm_softc)
6374};
6375
6376static devclass_t iwm_devclass;
6377
6378DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6379MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6380MODULE_DEPEND(iwm, pci, 1, 1, 1);
6381MODULE_DEPEND(iwm, wlan, 1, 1, 1);