if_iwm - Sync nvm parsing code with Linux iwlwifi.
[dragonfly.git] / sys / dev / netif / iwm / if_iwm.c
CommitLineData
352c1ce9 1/* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
24a8d46a
MD
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
b0b29253
MD
105/*
106 * DragonFly work
107 *
108 * NOTE: Relative to roughly August 8th sources, does not include FreeBSD
109 * changes to remove per-device network interface (DragonFly has not
110 * caught up to that yet on the WLAN side).
111 *
112 * Comprehensive list of adjustments for DragonFly not #ifdef'd:
113 * malloc -> kmalloc (in particular, changing improper M_NOWAIT
114 * specifications to M_INTWAIT. We still don't
115 * understand why FreeBSD uses M_NOWAIT for
116 * critical must-not-fail kmalloc()s).
117 * free -> kfree
118 * printf -> kprintf
119 * (bug fix) memset in iwm_reset_rx_ring.
120 * (debug) added several kprintf()s on error
121 *
b0b29253
MD
122 * header file paths (DFly allows localized path specifications).
123 * minor header file differences.
124 *
125 * Comprehensive list of adjustments for DragonFly #ifdef'd:
126 * (safety) added register read-back serialization in iwm_reset_rx_ring().
127 * packet counters
303bb3ad 128 * msleep -> lksleep
b0b29253
MD
129 * mtx -> lk (mtx functions -> lockmgr functions)
130 * callout differences
131 * taskqueue differences
b0b29253
MD
132 * MSI differences
133 * bus_setup_intr() differences
134 * minor PCI config register naming differences
135 */
24a8d46a
MD
136#include <sys/cdefs.h>
137__FBSDID("$FreeBSD$");
138
139#include <sys/param.h>
140#include <sys/bus.h>
24a8d46a
MD
141#include <sys/endian.h>
142#include <sys/firmware.h>
143#include <sys/kernel.h>
144#include <sys/malloc.h>
145#include <sys/mbuf.h>
24a8d46a 146#include <sys/module.h>
24a8d46a 147#include <sys/rman.h>
24a8d46a
MD
148#include <sys/sysctl.h>
149#include <sys/linker.h>
150
24a8d46a 151#include <machine/endian.h>
24a8d46a 152
45bc40b1
MD
153#include <bus/pci/pcivar.h>
154#include <bus/pci/pcireg.h>
24a8d46a
MD
155
156#include <net/bpf.h>
157
158#include <net/if.h>
159#include <net/if_var.h>
160#include <net/if_arp.h>
161#include <net/if_dl.h>
162#include <net/if_media.h>
163#include <net/if_types.h>
164
165#include <netinet/in.h>
166#include <netinet/in_systm.h>
167#include <netinet/if_ether.h>
168#include <netinet/ip.h>
169
45bc40b1
MD
170#include <netproto/802_11/ieee80211_var.h>
171#include <netproto/802_11/ieee80211_regdomain.h>
172#include <netproto/802_11/ieee80211_ratectl.h>
173#include <netproto/802_11/ieee80211_radiotap.h>
174
175#include "if_iwmreg.h"
176#include "if_iwmvar.h"
177#include "if_iwm_debug.h"
178#include "if_iwm_util.h"
179#include "if_iwm_binding.h"
180#include "if_iwm_phy_db.h"
181#include "if_iwm_mac_ctxt.h"
182#include "if_iwm_phy_ctxt.h"
183#include "if_iwm_time_event.h"
184#include "if_iwm_power.h"
185#include "if_iwm_scan.h"
186#include "if_iwm_pcie_trans.h"
22380cdd 187#include "if_iwm_led.h"
24a8d46a
MD
188
189const uint8_t iwm_nvm_channels[] = {
190 /* 2.4 GHz */
191 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
192 /* 5 GHz */
77de6c2d 193 36, 40, 44, 48, 52, 56, 60, 64,
24a8d46a
MD
194 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
195 149, 153, 157, 161, 165
196};
77de6c2d
IV
197_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
198 "IWM_NUM_CHANNELS is too small");
199
e8951a47
IV
200const uint8_t iwm_nvm_channels_8000[] = {
201 /* 2.4 GHz */
202 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
203 /* 5 GHz */
204 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
205 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
206 149, 153, 157, 161, 165, 169, 173, 177, 181
207};
208_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
209 "IWM_NUM_CHANNELS_8000 is too small");
210
211#define IWM_NUM_2GHZ_CHANNELS 14
212#define IWM_N_HW_ADDR_MASK 0xF
213
24a8d46a
MD
214/*
215 * XXX For now, there's simply a fixed set of rate table entries
216 * that are populated.
217 */
218const struct iwm_rate {
219 uint8_t rate;
220 uint8_t plcp;
221} iwm_rates[] = {
222 { 2, IWM_RATE_1M_PLCP },
223 { 4, IWM_RATE_2M_PLCP },
224 { 11, IWM_RATE_5M_PLCP },
225 { 22, IWM_RATE_11M_PLCP },
226 { 12, IWM_RATE_6M_PLCP },
227 { 18, IWM_RATE_9M_PLCP },
228 { 24, IWM_RATE_12M_PLCP },
229 { 36, IWM_RATE_18M_PLCP },
230 { 48, IWM_RATE_24M_PLCP },
231 { 72, IWM_RATE_36M_PLCP },
232 { 96, IWM_RATE_48M_PLCP },
233 { 108, IWM_RATE_54M_PLCP },
234};
235#define IWM_RIDX_CCK 0
236#define IWM_RIDX_OFDM 4
237#define IWM_RIDX_MAX (nitems(iwm_rates)-1)
238#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
239#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
240
edfc8a07
IV
241struct iwm_nvm_section {
242 uint16_t length;
243 uint8_t *data;
244};
245
24a8d46a
MD
246static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
247static int iwm_firmware_store_section(struct iwm_softc *,
248 enum iwm_ucode_type,
249 const uint8_t *, size_t);
250static int iwm_set_default_calib(struct iwm_softc *, const void *);
251static void iwm_fw_info_free(struct iwm_fw_info *);
252static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
f073608f 253#if !defined(__DragonFly__)
24a8d46a 254static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
f073608f 255#endif
24a8d46a
MD
256static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 bus_size_t, bus_size_t);
258static void iwm_dma_contig_free(struct iwm_dma_info *);
259static int iwm_alloc_fwmem(struct iwm_softc *);
24a8d46a 260static int iwm_alloc_sched(struct iwm_softc *);
24a8d46a 261static int iwm_alloc_kw(struct iwm_softc *);
24a8d46a 262static int iwm_alloc_ict(struct iwm_softc *);
24a8d46a 263static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
ed355587 264static void iwm_disable_rx_dma(struct iwm_softc *);
24a8d46a
MD
265static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
266static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
267static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
268 int);
269static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
270static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
271static void iwm_enable_interrupts(struct iwm_softc *);
272static void iwm_restore_interrupts(struct iwm_softc *);
273static void iwm_disable_interrupts(struct iwm_softc *);
274static void iwm_ict_reset(struct iwm_softc *);
275static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
276static void iwm_stop_device(struct iwm_softc *);
277static void iwm_mvm_nic_config(struct iwm_softc *);
278static int iwm_nic_rx_init(struct iwm_softc *);
279static int iwm_nic_tx_init(struct iwm_softc *);
280static int iwm_nic_init(struct iwm_softc *);
edfc8a07 281static int iwm_enable_txq(struct iwm_softc *, int, int, int);
24a8d46a
MD
282static int iwm_post_alive(struct iwm_softc *);
283static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
284 uint16_t, uint8_t *, uint16_t *);
285static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
39f8331b 286 uint16_t *, uint32_t);
77de6c2d
IV
287static uint32_t iwm_eeprom_channel_flags(uint16_t);
288static void iwm_add_channel_band(struct iwm_softc *,
edfc8a07 289 struct ieee80211_channel[], int, int *, int, size_t,
77de6c2d
IV
290 const uint8_t[]);
291static void iwm_init_channel_map(struct ieee80211com *, int, int *,
292 struct ieee80211_channel[]);
39f8331b
IV
293static struct iwm_nvm_data *
294 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
295 const uint16_t *, const uint16_t *,
296 const uint16_t *, const uint16_t *,
297 const uint16_t *);
298static void iwm_free_nvm_data(struct iwm_nvm_data *);
299static void iwm_set_hw_address_family_8000(struct iwm_softc *,
300 struct iwm_nvm_data *,
301 const uint16_t *,
302 const uint16_t *);
e8951a47
IV
303static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
304 const uint16_t *);
305static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
306static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
307 const uint16_t *);
308static int iwm_get_n_hw_addrs(const struct iwm_softc *,
b9794186 309 const uint16_t *);
e8951a47
IV
310static void iwm_set_radio_cfg(const struct iwm_softc *,
311 struct iwm_nvm_data *, uint32_t);
39f8331b
IV
312static struct iwm_nvm_data *
313 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
24a8d46a 314static int iwm_nvm_init(struct iwm_softc *);
edfc8a07
IV
315static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
316 const uint8_t *, uint32_t);
24a8d46a
MD
317static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
318 const uint8_t *, uint32_t);
edfc8a07 319static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
e8951a47
IV
320static int iwm_load_cpu_sections_8000(struct iwm_softc *,
321 struct iwm_fw_sects *, int , int *);
322static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
24a8d46a
MD
323static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
324static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
24a8d46a
MD
325static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
326static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
327static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
328 enum iwm_ucode_type);
329static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
330static int iwm_rx_addbuf(struct iwm_softc *, int, int);
331static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
332static int iwm_mvm_get_signal_strength(struct iwm_softc *,
333 struct iwm_rx_phy_info *);
334static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
335 struct iwm_rx_packet *,
336 struct iwm_rx_data *);
337static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
338static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
339 struct iwm_rx_data *);
77de6c2d 340static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
24a8d46a
MD
341 struct iwm_rx_packet *,
342 struct iwm_node *);
343static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
344 struct iwm_rx_data *);
345static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
346#if 0
347static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
348 uint16_t);
349#endif
350static const struct iwm_rate *
351 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
352 struct ieee80211_frame *, struct iwm_tx_cmd *);
353static int iwm_tx(struct iwm_softc *, struct mbuf *,
354 struct ieee80211_node *, int);
355static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
356 const struct ieee80211_bpf_params *);
24a8d46a 357static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
edfc8a07 358 struct iwm_mvm_add_sta_cmd_v7 *,
24a8d46a
MD
359 int *);
360static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
361 int);
362static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
363static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
364static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
365 struct iwm_int_sta *,
366 const uint8_t *, uint16_t, uint16_t);
367static int iwm_mvm_add_aux_sta(struct iwm_softc *);
368static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
369static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
370static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
371static int iwm_release(struct iwm_softc *, struct iwm_node *);
372static struct ieee80211_node *
373 iwm_node_alloc(struct ieee80211vap *,
374 const uint8_t[IEEE80211_ADDR_LEN]);
375static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
376static int iwm_media_change(struct ifnet *);
377static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
378static void iwm_endscan_cb(void *, int);
edfc8a07
IV
379static void iwm_mvm_fill_sf_command(struct iwm_softc *,
380 struct iwm_sf_cfg_cmd *,
381 struct ieee80211_node *);
382static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
383static int iwm_send_bt_init_conf(struct iwm_softc *);
384static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
385static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
24a8d46a 386static int iwm_init_hw(struct iwm_softc *);
77de6c2d
IV
387static void iwm_init(struct iwm_softc *);
388static void iwm_start(struct iwm_softc *);
389static void iwm_stop(struct iwm_softc *);
24a8d46a 390static void iwm_watchdog(void *);
77de6c2d 391static void iwm_parent(struct ieee80211com *);
24a8d46a
MD
392#ifdef IWM_DEBUG
393static const char *
394 iwm_desc_lookup(uint32_t);
395static void iwm_nic_error(struct iwm_softc *);
e8951a47 396static void iwm_nic_umac_error(struct iwm_softc *);
24a8d46a
MD
397#endif
398static void iwm_notif_intr(struct iwm_softc *);
399static void iwm_intr(void *);
400static int iwm_attach(device_t);
719c937a 401static int iwm_is_valid_ether_addr(uint8_t *);
24a8d46a
MD
402static void iwm_preinit(void *);
403static int iwm_detach_local(struct iwm_softc *sc, int);
404static void iwm_init_task(void *);
405static void iwm_radiotap_attach(struct iwm_softc *);
406static struct ieee80211vap *
407 iwm_vap_create(struct ieee80211com *,
408 const char [IFNAMSIZ], int,
409 enum ieee80211_opmode, int,
410 const uint8_t [IEEE80211_ADDR_LEN],
411 const uint8_t [IEEE80211_ADDR_LEN]);
412static void iwm_vap_delete(struct ieee80211vap *);
413static void iwm_scan_start(struct ieee80211com *);
414static void iwm_scan_end(struct ieee80211com *);
4f898719 415static void iwm_update_mcast(struct ieee80211com *);
24a8d46a
MD
416static void iwm_set_channel(struct ieee80211com *);
417static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
418static void iwm_scan_mindwell(struct ieee80211_scan_state *);
419static int iwm_detach(device_t);
420
b0b29253 421#if defined(__DragonFly__)
af1faf79
IV
422static int iwm_msi_enable = 1;
423
424TUNABLE_INT("hw.iwm.msi.enable", &iwm_msi_enable);
425
b0b29253
MD
426#endif
427
24a8d46a
MD
428/*
429 * Firmware parser.
430 */
431
432static int
433iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
434{
435 const struct iwm_fw_cscheme_list *l = (const void *)data;
436
437 if (dlen < sizeof(*l) ||
438 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
439 return EINVAL;
440
441 /* we don't actually store anything for now, always use s/w crypto */
442
443 return 0;
444}
445
446static int
447iwm_firmware_store_section(struct iwm_softc *sc,
448 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
449{
450 struct iwm_fw_sects *fws;
451 struct iwm_fw_onesect *fwone;
452
453 if (type >= IWM_UCODE_TYPE_MAX)
454 return EINVAL;
455 if (dlen < sizeof(uint32_t))
456 return EINVAL;
457
458 fws = &sc->sc_fw.fw_sects[type];
459 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
460 return EINVAL;
461
462 fwone = &fws->fw_sect[fws->fw_count];
463
464 /* first 32bit are device load offset */
465 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
466
467 /* rest is data */
468 fwone->fws_data = data + sizeof(uint32_t);
469 fwone->fws_len = dlen - sizeof(uint32_t);
470
471 fws->fw_count++;
472 fws->fw_totlen += fwone->fws_len;
473
474 return 0;
475}
476
33ded22a
IV
477#define IWM_DEFAULT_SCAN_CHANNELS 40
478
24a8d46a
MD
479struct iwm_tlv_calib_data {
480 uint32_t ucode_type;
481 struct iwm_tlv_calib_ctrl calib;
482} __packed;
483
484static int
485iwm_set_default_calib(struct iwm_softc *sc, const void *data)
486{
487 const struct iwm_tlv_calib_data *def_calib = data;
488 uint32_t ucode_type = le32toh(def_calib->ucode_type);
489
490 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
491 device_printf(sc->sc_dev,
492 "Wrong ucode_type %u for default "
493 "calibration.\n", ucode_type);
494 return EINVAL;
495 }
496
497 sc->sc_default_calib[ucode_type].flow_trigger =
498 def_calib->calib.flow_trigger;
499 sc->sc_default_calib[ucode_type].event_trigger =
500 def_calib->calib.event_trigger;
501
502 return 0;
503}
504
505static void
506iwm_fw_info_free(struct iwm_fw_info *fw)
507{
c9fe4836
IV
508 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
509 fw->fw_fp = NULL;
24a8d46a
MD
510 /* don't touch fw->fw_status */
511 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
512}
513
514static int
515iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
516{
517 struct iwm_fw_info *fw = &sc->sc_fw;
518 const struct iwm_tlv_ucode_header *uhdr;
519 struct iwm_ucode_tlv tlv;
520 enum iwm_ucode_tlv_type tlv_type;
521 const struct firmware *fwp;
522 const uint8_t *data;
523 int error = 0;
524 size_t len;
525
526 if (fw->fw_status == IWM_FW_STATUS_DONE &&
527 ucode_type != IWM_UCODE_TYPE_INIT)
528 return 0;
529
45bc40b1 530 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) {
b0b29253 531#if defined(__DragonFly__)
303bb3ad 532 lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfwp", 0);
b0b29253
MD
533#else
534 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
535#endif
45bc40b1 536 }
24a8d46a
MD
537 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
538
c9fe4836 539 if (fw->fw_fp != NULL)
24a8d46a
MD
540 iwm_fw_info_free(fw);
541
542 /*
543 * Load firmware into driver memory.
c9fe4836 544 * fw_fp will be set.
24a8d46a
MD
545 */
546 IWM_UNLOCK(sc);
547 fwp = firmware_get(sc->sc_fwname);
77de6c2d 548 IWM_LOCK(sc);
24a8d46a
MD
549 if (fwp == NULL) {
550 device_printf(sc->sc_dev,
551 "could not read firmware %s (error %d)\n",
552 sc->sc_fwname, error);
24a8d46a
MD
553 goto out;
554 }
c9fe4836 555 fw->fw_fp = fwp;
24a8d46a 556
edfc8a07
IV
557 /* (Re-)Initialize default values. */
558 sc->sc_capaflags = 0;
33ded22a 559 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
edfc8a07
IV
560 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
561 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
562
24a8d46a
MD
563 /*
564 * Parse firmware contents
565 */
566
77de6c2d 567 uhdr = (const void *)fw->fw_fp->data;
c9fe4836 568 if (*(const uint32_t *)fw->fw_fp->data != 0
24a8d46a
MD
569 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
570 device_printf(sc->sc_dev, "invalid firmware %s\n",
571 sc->sc_fwname);
572 error = EINVAL;
573 goto out;
574 }
575
edfc8a07
IV
576 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
577 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
578 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
579 IWM_UCODE_API(le32toh(uhdr->ver)));
24a8d46a 580 data = uhdr->data;
c9fe4836 581 len = fw->fw_fp->datasize - sizeof(*uhdr);
24a8d46a
MD
582
583 while (len >= sizeof(tlv)) {
584 size_t tlv_len;
585 const void *tlv_data;
586
587 memcpy(&tlv, data, sizeof(tlv));
588 tlv_len = le32toh(tlv.length);
589 tlv_type = le32toh(tlv.type);
590
591 len -= sizeof(tlv);
592 data += sizeof(tlv);
593 tlv_data = data;
594
595 if (len < tlv_len) {
596 device_printf(sc->sc_dev,
597 "firmware too short: %zu bytes\n",
598 len);
599 error = EINVAL;
600 goto parse_out;
601 }
602
603 switch ((int)tlv_type) {
604 case IWM_UCODE_TLV_PROBE_MAX_LEN:
605 if (tlv_len < sizeof(uint32_t)) {
606 device_printf(sc->sc_dev,
607 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
608 __func__,
609 (int) tlv_len);
610 error = EINVAL;
611 goto parse_out;
612 }
613 sc->sc_capa_max_probe_len
614 = le32toh(*(const uint32_t *)tlv_data);
615 /* limit it to something sensible */
edfc8a07
IV
616 if (sc->sc_capa_max_probe_len >
617 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
24a8d46a
MD
618 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
619 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
620 "ridiculous\n", __func__);
621 error = EINVAL;
622 goto parse_out;
623 }
624 break;
625 case IWM_UCODE_TLV_PAN:
626 if (tlv_len) {
627 device_printf(sc->sc_dev,
628 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
629 __func__,
630 (int) tlv_len);
631 error = EINVAL;
632 goto parse_out;
633 }
634 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
635 break;
636 case IWM_UCODE_TLV_FLAGS:
637 if (tlv_len < sizeof(uint32_t)) {
638 device_printf(sc->sc_dev,
639 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
640 __func__,
641 (int) tlv_len);
642 error = EINVAL;
643 goto parse_out;
644 }
645 /*
646 * Apparently there can be many flags, but Linux driver
647 * parses only the first one, and so do we.
648 *
649 * XXX: why does this override IWM_UCODE_TLV_PAN?
650 * Intentional or a bug? Observations from
651 * current firmware file:
652 * 1) TLV_PAN is parsed first
653 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
654 * ==> this resets TLV_PAN to itself... hnnnk
655 */
656 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
657 break;
658 case IWM_UCODE_TLV_CSCHEME:
659 if ((error = iwm_store_cscheme(sc,
660 tlv_data, tlv_len)) != 0) {
661 device_printf(sc->sc_dev,
662 "%s: iwm_store_cscheme(): returned %d\n",
663 __func__,
664 error);
665 goto parse_out;
666 }
667 break;
e8951a47
IV
668 case IWM_UCODE_TLV_NUM_OF_CPU: {
669 uint32_t num_cpu;
24a8d46a
MD
670 if (tlv_len != sizeof(uint32_t)) {
671 device_printf(sc->sc_dev,
672 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
673 __func__,
674 (int) tlv_len);
675 error = EINVAL;
676 goto parse_out;
677 }
e8951a47
IV
678 num_cpu = le32toh(*(const uint32_t *)tlv_data);
679 if (num_cpu < 1 || num_cpu > 2) {
24a8d46a 680 device_printf(sc->sc_dev,
e8951a47 681 "%s: Driver supports only 1 or 2 CPUs\n",
24a8d46a
MD
682 __func__);
683 error = EINVAL;
684 goto parse_out;
685 }
686 break;
e8951a47 687 }
24a8d46a
MD
688 case IWM_UCODE_TLV_SEC_RT:
689 if ((error = iwm_firmware_store_section(sc,
690 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
691 device_printf(sc->sc_dev,
692 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
693 __func__,
694 error);
695 goto parse_out;
696 }
697 break;
698 case IWM_UCODE_TLV_SEC_INIT:
699 if ((error = iwm_firmware_store_section(sc,
700 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
701 device_printf(sc->sc_dev,
702 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
703 __func__,
704 error);
705 goto parse_out;
706 }
707 break;
708 case IWM_UCODE_TLV_SEC_WOWLAN:
709 if ((error = iwm_firmware_store_section(sc,
710 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
711 device_printf(sc->sc_dev,
712 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
713 __func__,
714 error);
715 goto parse_out;
716 }
717 break;
718 case IWM_UCODE_TLV_DEF_CALIB:
719 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
720 device_printf(sc->sc_dev,
721 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
722 __func__,
723 (int) tlv_len,
724 (int) sizeof(struct iwm_tlv_calib_data));
725 error = EINVAL;
726 goto parse_out;
727 }
728 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
729 device_printf(sc->sc_dev,
730 "%s: iwm_set_default_calib() failed: %d\n",
731 __func__,
732 error);
733 goto parse_out;
734 }
735 break;
736 case IWM_UCODE_TLV_PHY_SKU:
737 if (tlv_len != sizeof(uint32_t)) {
738 error = EINVAL;
739 device_printf(sc->sc_dev,
740 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
741 __func__,
742 (int) tlv_len);
743 goto parse_out;
744 }
745 sc->sc_fw_phy_config =
746 le32toh(*(const uint32_t *)tlv_data);
747 break;
748
edfc8a07
IV
749 case IWM_UCODE_TLV_API_CHANGES_SET: {
750 const struct iwm_ucode_api *api;
751 if (tlv_len != sizeof(*api)) {
752 error = EINVAL;
753 goto parse_out;
754 }
755 api = (const struct iwm_ucode_api *)tlv_data;
756 /* Flags may exceed 32 bits in future firmware. */
757 if (le32toh(api->api_index) > 0) {
758 device_printf(sc->sc_dev,
759 "unsupported API index %d\n",
760 le32toh(api->api_index));
761 goto parse_out;
762 }
763 sc->sc_ucode_api = le32toh(api->api_flags);
764 break;
765 }
766
767 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
768 const struct iwm_ucode_capa *capa;
769 int idx, i;
770 if (tlv_len != sizeof(*capa)) {
771 error = EINVAL;
772 goto parse_out;
773 }
774 capa = (const struct iwm_ucode_capa *)tlv_data;
775 idx = le32toh(capa->api_index);
7d74ebe9 776 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
edfc8a07
IV
777 device_printf(sc->sc_dev,
778 "unsupported API index %d\n", idx);
779 goto parse_out;
780 }
781 for (i = 0; i < 32; i++) {
782 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
783 continue;
784 setbit(sc->sc_enabled_capa, i + (32 * idx));
785 }
786 break;
787 }
788
789 case 48: /* undocumented TLV */
790 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
791 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
24a8d46a
MD
792 /* ignore, not used by current driver */
793 break;
794
edfc8a07
IV
795 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
796 if ((error = iwm_firmware_store_section(sc,
797 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
798 tlv_len)) != 0)
799 goto parse_out;
800 break;
801
802 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
803 if (tlv_len != sizeof(uint32_t)) {
804 error = EINVAL;
805 goto parse_out;
806 }
807 sc->sc_capa_n_scan_channels =
808 le32toh(*(const uint32_t *)tlv_data);
809 break;
810
811 case IWM_UCODE_TLV_FW_VERSION:
812 if (tlv_len != sizeof(uint32_t) * 3) {
813 error = EINVAL;
814 goto parse_out;
815 }
816 ksnprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
817 "%d.%d.%d",
818 le32toh(((const uint32_t *)tlv_data)[0]),
819 le32toh(((const uint32_t *)tlv_data)[1]),
820 le32toh(((const uint32_t *)tlv_data)[2]));
821 break;
822
24a8d46a
MD
823 default:
824 device_printf(sc->sc_dev,
825 "%s: unknown firmware section %d, abort\n",
826 __func__, tlv_type);
827 error = EINVAL;
828 goto parse_out;
829 }
830
831 len -= roundup(tlv_len, 4);
832 data += roundup(tlv_len, 4);
833 }
834
835 KASSERT(error == 0, ("unhandled error"));
836
837 parse_out:
838 if (error) {
839 device_printf(sc->sc_dev, "firmware parse error %d, "
840 "section type %d\n", error, tlv_type);
841 }
842
843 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
844 device_printf(sc->sc_dev,
845 "device uses unsupported power ops\n");
846 error = ENOTSUP;
847 }
848
849 out:
850 if (error) {
851 fw->fw_status = IWM_FW_STATUS_NONE;
c9fe4836 852 if (fw->fw_fp != NULL)
24a8d46a
MD
853 iwm_fw_info_free(fw);
854 } else
855 fw->fw_status = IWM_FW_STATUS_DONE;
856 wakeup(&sc->sc_fw);
857
858 return error;
859}
860
861/*
862 * DMA resource routines
863 */
864
f073608f 865#if !defined(__DragonFly__)
24a8d46a
MD
866static void
867iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868{
869 if (error != 0)
870 return;
871 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
f54e4dfe 872 *(bus_addr_t *)arg = segs[0].ds_addr;
24a8d46a 873}
f073608f 874#endif
24a8d46a
MD
875
876static int
877iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
878 bus_size_t size, bus_size_t alignment)
879{
880 int error;
881
882 dma->tag = NULL;
f073608f 883 dma->map = NULL;
24a8d46a 884 dma->size = size;
ef2b29a7 885 dma->vaddr = NULL;
24a8d46a 886
45bc40b1 887#if defined(__DragonFly__)
f073608f
IV
888 bus_dmamem_t dmem;
889 error = bus_dmamem_coherent(tag, alignment, 0,
890 BUS_SPACE_MAXADDR_32BIT,
891 BUS_SPACE_MAXADDR,
892 size, BUS_DMA_NOWAIT, &dmem);
893 if (error != 0)
894 goto fail;
895
896 dma->tag = dmem.dmem_tag;
897 dma->map = dmem.dmem_map;
898 dma->vaddr = dmem.dmem_addr;
899 dma->paddr = dmem.dmem_busaddr;
45bc40b1 900#else
24a8d46a
MD
901 error = bus_dma_tag_create(tag, alignment,
902 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
77de6c2d 903 1, size, 0, NULL, NULL, &dma->tag);
24a8d46a
MD
904 if (error != 0)
905 goto fail;
906
907 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
908 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
909 if (error != 0)
910 goto fail;
911
912 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
913 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
ef2b29a7
IV
914 if (error != 0) {
915 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
916 dma->vaddr = NULL;
917 goto fail;
918 }
f073608f 919#endif
24a8d46a
MD
920
921 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
922
923 return 0;
924
45bc40b1 925fail:
45bc40b1
MD
926 iwm_dma_contig_free(dma);
927
24a8d46a
MD
928 return error;
929}
930
931static void
932iwm_dma_contig_free(struct iwm_dma_info *dma)
933{
ef2b29a7
IV
934 if (dma->vaddr != NULL) {
935 bus_dmamap_sync(dma->tag, dma->map,
936 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
937 bus_dmamap_unload(dma->tag, dma->map);
938 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
939 dma->vaddr = NULL;
24a8d46a
MD
940 }
941 if (dma->tag != NULL) {
942 bus_dma_tag_destroy(dma->tag);
943 dma->tag = NULL;
944 }
24a8d46a
MD
945}
946
947/* fwmem is used to load firmware onto the card */
948static int
949iwm_alloc_fwmem(struct iwm_softc *sc)
950{
951 /* Must be aligned on a 16-byte boundary. */
952 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
953 sc->sc_fwdmasegsz, 16);
954}
955
24a8d46a
MD
956/* tx scheduler rings. not used? */
957static int
958iwm_alloc_sched(struct iwm_softc *sc)
959{
24a8d46a 960 /* TX scheduler rings must be aligned on a 1KB boundary. */
2ee486dd 961 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
24a8d46a 962 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
24a8d46a
MD
963}
964
24a8d46a
MD
965/* keep-warm page is used internally by the card. see iwl-fh.h for more info */
966static int
967iwm_alloc_kw(struct iwm_softc *sc)
968{
969 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
970}
971
24a8d46a
MD
972/* interrupt cause table */
973static int
974iwm_alloc_ict(struct iwm_softc *sc)
975{
976 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
977 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
978}
979
24a8d46a
MD
980static int
981iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
982{
983 bus_size_t size;
984 int i, error;
985
986 ring->cur = 0;
987
988 /* Allocate RX descriptors (256-byte aligned). */
989 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
b0b29253 990 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
24a8d46a
MD
991 if (error != 0) {
992 device_printf(sc->sc_dev,
993 "could not allocate RX ring DMA memory\n");
994 goto fail;
995 }
996 ring->desc = ring->desc_dma.vaddr;
997
998 /* Allocate RX status area (16-byte aligned). */
999 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1000 sizeof(*ring->stat), 16);
1001 if (error != 0) {
1002 device_printf(sc->sc_dev,
1003 "could not allocate RX status DMA memory\n");
1004 goto fail;
1005 }
1006 ring->stat = ring->stat_dma.vaddr;
1007
1008 /* Create RX buffer DMA tag. */
45bc40b1
MD
1009#if defined(__DragonFly__)
1010 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1011 0,
1012 BUS_SPACE_MAXADDR_32BIT,
1013 BUS_SPACE_MAXADDR,
1014 NULL, NULL,
1015 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE,
1016 BUS_DMA_NOWAIT, &ring->data_dmat);
1017#else
24a8d46a
MD
1018 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1019 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
77de6c2d 1020 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
45bc40b1 1021#endif
24a8d46a
MD
1022 if (error != 0) {
1023 device_printf(sc->sc_dev,
1024 "%s: could not create RX buf DMA tag, error %d\n",
1025 __func__, error);
1026 goto fail;
1027 }
1028
cc440b26
IV
1029 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1030 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1031 if (error != 0) {
1032 device_printf(sc->sc_dev,
1033 "%s: could not create RX buf DMA map, error %d\n",
1034 __func__, error);
1035 goto fail;
1036 }
24a8d46a
MD
1037 /*
1038 * Allocate and map RX buffers.
1039 */
1040 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
cc440b26
IV
1041 struct iwm_rx_data *data = &ring->data[i];
1042 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1043 if (error != 0) {
1044 device_printf(sc->sc_dev,
1045 "%s: could not create RX buf DMA map, error %d\n",
1046 __func__, error);
1047 goto fail;
1048 }
1049 data->m = NULL;
1050
24a8d46a
MD
1051 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1052 goto fail;
1053 }
1054 }
1055 return 0;
1056
1057fail: iwm_free_rx_ring(sc, ring);
1058 return error;
1059}
1060
1061static void
ed355587 1062iwm_disable_rx_dma(struct iwm_softc *sc)
24a8d46a 1063{
b0b29253 1064 /* XXX conditional nic locks are stupid */
24a8d46a
MD
1065 /* XXX print out if we can't lock the NIC? */
1066 if (iwm_nic_lock(sc)) {
1067 /* XXX handle if RX stop doesn't finish? */
1068 (void) iwm_pcie_rx_stop(sc);
1069 iwm_nic_unlock(sc);
1070 }
ed355587
IV
1071}
1072
1073static void
1074iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1075{
77de6c2d 1076 /* Reset the ring state */
24a8d46a 1077 ring->cur = 0;
b0b29253
MD
1078
1079 /*
1080 * The hw rx ring index in shared memory must also be cleared,
1081 * otherwise the discrepancy can cause reprocessing chaos.
1082 */
1083 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
24a8d46a
MD
1084}
1085
1086static void
1087iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1088{
1089 int i;
1090
1091 iwm_dma_contig_free(&ring->desc_dma);
1092 iwm_dma_contig_free(&ring->stat_dma);
1093
1094 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1095 struct iwm_rx_data *data = &ring->data[i];
1096
1097 if (data->m != NULL) {
1098 bus_dmamap_sync(ring->data_dmat, data->map,
1099 BUS_DMASYNC_POSTREAD);
1100 bus_dmamap_unload(ring->data_dmat, data->map);
1101 m_freem(data->m);
1102 data->m = NULL;
1103 }
1104 if (data->map != NULL) {
1105 bus_dmamap_destroy(ring->data_dmat, data->map);
1106 data->map = NULL;
1107 }
1108 }
cc440b26
IV
1109 if (ring->spare_map != NULL) {
1110 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1111 ring->spare_map = NULL;
1112 }
24a8d46a
MD
1113 if (ring->data_dmat != NULL) {
1114 bus_dma_tag_destroy(ring->data_dmat);
1115 ring->data_dmat = NULL;
1116 }
1117}
1118
1119static int
1120iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1121{
1122 bus_addr_t paddr;
1123 bus_size_t size;
65b2e1ba
IV
1124 size_t maxsize;
1125 int nsegments;
24a8d46a
MD
1126 int i, error;
1127
1128 ring->qid = qid;
1129 ring->queued = 0;
1130 ring->cur = 0;
1131
1132 /* Allocate TX descriptors (256-byte aligned). */
1133 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1134 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1135 if (error != 0) {
1136 device_printf(sc->sc_dev,
1137 "could not allocate TX ring DMA memory\n");
1138 goto fail;
1139 }
1140 ring->desc = ring->desc_dma.vaddr;
1141
1142 /*
1143 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1144 * to allocate commands space for other rings.
1145 */
1146 if (qid > IWM_MVM_CMD_QUEUE)
1147 return 0;
1148
1149 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1150 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1151 if (error != 0) {
1152 device_printf(sc->sc_dev,
1153 "could not allocate TX cmd DMA memory\n");
1154 goto fail;
1155 }
1156 ring->cmd = ring->cmd_dma.vaddr;
1157
65b2e1ba
IV
1158 /* FW commands may require more mapped space than packets. */
1159 if (qid == IWM_MVM_CMD_QUEUE) {
1160 maxsize = IWM_RBUF_SIZE;
1161 nsegments = 1;
1162 } else {
1163 maxsize = MCLBYTES;
1164 nsegments = IWM_MAX_SCATTER - 2;
1165 }
1166
45bc40b1
MD
1167#if defined(__DragonFly__)
1168 error = bus_dma_tag_create(sc->sc_dmat, PAGE_SIZE,
1169 0,
1170 BUS_SPACE_MAXADDR_32BIT,
1171 BUS_SPACE_MAXADDR,
1172 NULL, NULL,
65b2e1ba 1173 maxsize, nsegments, maxsize,
45bc40b1
MD
1174 BUS_DMA_NOWAIT, &ring->data_dmat);
1175#else
24a8d46a 1176 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
65b2e1ba
IV
1177 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1178 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
45bc40b1 1179#endif
24a8d46a
MD
1180 if (error != 0) {
1181 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1182 goto fail;
1183 }
1184
1185 paddr = ring->cmd_dma.paddr;
1186 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1187 struct iwm_tx_data *data = &ring->data[i];
1188
1189 data->cmd_paddr = paddr;
1190 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1191 + offsetof(struct iwm_tx_cmd, scratch);
1192 paddr += sizeof(struct iwm_device_cmd);
1193
1194 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1195 if (error != 0) {
1196 device_printf(sc->sc_dev,
1197 "could not create TX buf DMA map\n");
1198 goto fail;
1199 }
1200 }
1201 KASSERT(paddr == ring->cmd_dma.paddr + size,
1202 ("invalid physical address"));
1203 return 0;
1204
1205fail: iwm_free_tx_ring(sc, ring);
1206 return error;
1207}
1208
1209static void
1210iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1211{
1212 int i;
1213
1214 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1215 struct iwm_tx_data *data = &ring->data[i];
1216
1217 if (data->m != NULL) {
1218 bus_dmamap_sync(ring->data_dmat, data->map,
1219 BUS_DMASYNC_POSTWRITE);
1220 bus_dmamap_unload(ring->data_dmat, data->map);
1221 m_freem(data->m);
1222 data->m = NULL;
1223 }
1224 }
1225 /* Clear TX descriptors. */
1226 memset(ring->desc, 0, ring->desc_dma.size);
1227 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1228 BUS_DMASYNC_PREWRITE);
1229 sc->qfullmsk &= ~(1 << ring->qid);
1230 ring->queued = 0;
1231 ring->cur = 0;
1232}
1233
1234static void
1235iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1236{
1237 int i;
1238
1239 iwm_dma_contig_free(&ring->desc_dma);
1240 iwm_dma_contig_free(&ring->cmd_dma);
1241
1242 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1243 struct iwm_tx_data *data = &ring->data[i];
1244
1245 if (data->m != NULL) {
1246 bus_dmamap_sync(ring->data_dmat, data->map,
1247 BUS_DMASYNC_POSTWRITE);
1248 bus_dmamap_unload(ring->data_dmat, data->map);
1249 m_freem(data->m);
1250 data->m = NULL;
1251 }
1252 if (data->map != NULL) {
1253 bus_dmamap_destroy(ring->data_dmat, data->map);
1254 data->map = NULL;
1255 }
1256 }
1257 if (ring->data_dmat != NULL) {
1258 bus_dma_tag_destroy(ring->data_dmat);
1259 ring->data_dmat = NULL;
1260 }
1261}
1262
1263/*
1264 * High-level hardware frobbing routines
1265 */
1266
1267static void
1268iwm_enable_interrupts(struct iwm_softc *sc)
1269{
1270 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1271 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1272}
1273
1274static void
1275iwm_restore_interrupts(struct iwm_softc *sc)
1276{
1277 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1278}
1279
1280static void
1281iwm_disable_interrupts(struct iwm_softc *sc)
1282{
1283 /* disable interrupts */
1284 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1285
1286 /* acknowledge all interrupts */
1287 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1288 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1289}
1290
1291static void
1292iwm_ict_reset(struct iwm_softc *sc)
1293{
1294 iwm_disable_interrupts(sc);
1295
1296 /* Reset ICT table. */
1297 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1298 sc->ict_cur = 0;
1299
1300 /* Set physical address of ICT table (4KB aligned). */
1301 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1302 IWM_CSR_DRAM_INT_TBL_ENABLE
edfc8a07 1303 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
24a8d46a
MD
1304 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1305 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1306
1307 /* Switch to ICT interrupt mode in driver. */
1308 sc->sc_flags |= IWM_FLAG_USE_ICT;
1309
1310 /* Re-enable interrupts. */
1311 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1312 iwm_enable_interrupts(sc);
1313}
1314
24a8d46a
MD
1315/*
1316 * Since this .. hard-resets things, it's time to actually
1317 * mark the first vap (if any) as having no mac context.
1318 * It's annoying, but since the driver is potentially being
1319 * stop/start'ed whilst active (thanks openbsd port!) we
1320 * have to correctly track this.
1321 */
1322static void
1323iwm_stop_device(struct iwm_softc *sc)
1324{
77de6c2d 1325 struct ieee80211com *ic = &sc->sc_ic;
24a8d46a 1326 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2ee486dd
IV
1327 int chnl, qid;
1328 uint32_t mask = 0;
24a8d46a
MD
1329
1330 /* tell the device to stop sending interrupts */
1331 iwm_disable_interrupts(sc);
1332
1333 /*
1334 * FreeBSD-local: mark the first vap as not-uploaded,
1335 * so the next transition through auth/assoc
1336 * will correctly populate the MAC context.
1337 */
1338 if (vap) {
1339 struct iwm_vap *iv = IWM_VAP(vap);
1340 iv->is_uploaded = 0;
1341 }
1342
1343 /* device going down, Stop using ICT table */
1344 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1345
1346 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1347
1348 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1349
24a8d46a 1350 if (iwm_nic_lock(sc)) {
2ee486dd 1351 /* Stop each Tx DMA channel */
24a8d46a
MD
1352 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1353 IWM_WRITE(sc,
1354 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
2ee486dd
IV
1355 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1356 }
1357
1358 /* Wait for DMA channels to be idle */
144d4fa7
IV
1359 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1360 5000)) {
2ee486dd
IV
1361 device_printf(sc->sc_dev,
1362 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1363 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
24a8d46a
MD
1364 }
1365 iwm_nic_unlock(sc);
1366 }
ed355587 1367 iwm_disable_rx_dma(sc);
24a8d46a
MD
1368
1369 /* Stop RX ring. */
1370 iwm_reset_rx_ring(sc, &sc->rxq);
1371
1372 /* Reset all TX rings. */
1373 for (qid = 0; qid < nitems(sc->txq); qid++)
1374 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1375
1376 /*
1377 * Power-down device's busmaster DMA clocks
1378 */
1379 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1380 DELAY(5);
1381
1382 /* Make sure (redundant) we've released our request to stay awake */
1383 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1384 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1385
1386 /* Stop the device, and put it in low power state */
1387 iwm_apm_stop(sc);
1388
24a8d46a 1389 /* stop and reset the on-board processor */
edfc8a07 1390 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2427de16
IV
1391 DELAY(1000);
1392
1393 /*
1394 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1395 * This is a bug in certain verions of the hardware.
1396 * Certain devices also keep sending HW RF kill interrupt all
1397 * the time, unless the interrupt is ACKed even if the interrupt
1398 * should be masked. Re-ACK all the interrupts here.
1399 */
1400 iwm_disable_interrupts(sc);
24a8d46a
MD
1401
1402 /*
1403 * Even if we stop the HW, we still want the RF kill
1404 * interrupt
1405 */
1406 iwm_enable_rfkill_int(sc);
1407 iwm_check_rfkill(sc);
1408}
1409
24a8d46a
MD
1410static void
1411iwm_mvm_nic_config(struct iwm_softc *sc)
1412{
1413 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1414 uint32_t reg_val = 0;
1415
1416 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1417 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1418 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1419 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1420 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1421 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1422
1423 /* SKU control */
1424 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1425 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1426 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1427 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1428
1429 /* radio configuration */
1430 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1431 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1432 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1433
1434 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1435
1436 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1437 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1438 radio_cfg_step, radio_cfg_dash);
1439
1440 /*
1441 * W/A : NIC is stuck in a reset state after Early PCIe power off
1442 * (PCIe power is lost before PERST# is asserted), causing ME FW
1443 * to lose ownership and not being able to obtain it back.
1444 */
e8951a47
IV
1445 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1446 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1447 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1448 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1449 }
24a8d46a
MD
1450}
1451
1452static int
1453iwm_nic_rx_init(struct iwm_softc *sc)
1454{
1455 if (!iwm_nic_lock(sc))
1456 return EBUSY;
1457
1458 /*
1459 * Initialize RX ring. This is from the iwn driver.
1460 */
1461 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1462
1463 /* stop DMA */
ed355587 1464 iwm_disable_rx_dma(sc);
24a8d46a
MD
1465 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1466 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1467 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1468 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1469
1470 /* Set physical address of RX ring (256-byte aligned). */
1471 IWM_WRITE(sc,
1472 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1473
1474 /* Set physical address of RX status (16-byte aligned). */
1475 IWM_WRITE(sc,
1476 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
b0b29253
MD
1477
1478#if defined(__DragonFly__)
1479 /* Force serialization (probably not needed but don't trust the HW) */
45bc40b1 1480 IWM_READ(sc, IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG);
b0b29253 1481#endif
24a8d46a
MD
1482
1483 /* Enable RX. */
24a8d46a
MD
1484 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1485 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1486 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1487 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
edfc8a07
IV
1488 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1489 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
24a8d46a
MD
1490 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1491 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1492
1493 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1494
1495 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1496 if (sc->host_interrupt_operation_mode)
1497 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1498
1499 /*
1500 * Thus sayeth el jefe (iwlwifi) via a comment:
1501 *
1502 * This value should initially be 0 (before preparing any
1503 * RBs), should be 8 after preparing the first 8 RBs (for example)
1504 */
1505 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1506
1507 iwm_nic_unlock(sc);
1508
1509 return 0;
1510}
1511
1512static int
1513iwm_nic_tx_init(struct iwm_softc *sc)
1514{
1515 int qid;
1516
1517 if (!iwm_nic_lock(sc))
1518 return EBUSY;
1519
1520 /* Deactivate TX scheduler. */
1521 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1522
1523 /* Set physical address of "keep warm" page (16-byte aligned). */
1524 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1525
1526 /* Initialize TX rings. */
1527 for (qid = 0; qid < nitems(sc->txq); qid++) {
1528 struct iwm_tx_ring *txq = &sc->txq[qid];
1529
1530 /* Set physical address of TX ring (256-byte aligned). */
1531 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1532 txq->desc_dma.paddr >> 8);
1533 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1534 "%s: loading ring %d descriptors (%p) at %lx\n",
1535 __func__,
1536 qid, txq->desc,
1537 (unsigned long) (txq->desc_dma.paddr >> 8));
1538 }
edfc8a07
IV
1539
1540 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1541
24a8d46a
MD
1542 iwm_nic_unlock(sc);
1543
1544 return 0;
1545}
1546
1547static int
1548iwm_nic_init(struct iwm_softc *sc)
1549{
1550 int error;
1551
1552 iwm_apm_init(sc);
e8951a47
IV
1553 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1554 iwm_set_pwr(sc);
24a8d46a
MD
1555
1556 iwm_mvm_nic_config(sc);
1557
1558 if ((error = iwm_nic_rx_init(sc)) != 0)
1559 return error;
1560
1561 /*
1562 * Ditto for TX, from iwn
1563 */
1564 if ((error = iwm_nic_tx_init(sc)) != 0)
1565 return error;
1566
1567 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1568 "%s: shadow registers enabled\n", __func__);
1569 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1570
1571 return 0;
1572}
1573
24a8d46a
MD
1574const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1575 IWM_MVM_TX_FIFO_VO,
1576 IWM_MVM_TX_FIFO_VI,
1577 IWM_MVM_TX_FIFO_BE,
1578 IWM_MVM_TX_FIFO_BK,
1579};
1580
edfc8a07
IV
1581static int
1582iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
24a8d46a
MD
1583{
1584 if (!iwm_nic_lock(sc)) {
1585 device_printf(sc->sc_dev,
1586 "%s: cannot enable txq %d\n",
1587 __func__,
1588 qid);
edfc8a07 1589 return EBUSY;
24a8d46a
MD
1590 }
1591
edfc8a07 1592 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
24a8d46a 1593
edfc8a07
IV
1594 if (qid == IWM_MVM_CMD_QUEUE) {
1595 /* unactivate before configuration */
1596 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1597 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1598 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1599
1600 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1601
1602 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1603
1604 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1605 /* Set scheduler window size and frame limit. */
1606 iwm_write_mem32(sc,
1607 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1608 sizeof(uint32_t),
1609 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1610 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1611 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1612 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1613
1614 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1615 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1616 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1617 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1618 IWM_SCD_QUEUE_STTS_REG_MSK);
1619 } else {
1620 struct iwm_scd_txq_cfg_cmd cmd;
1621 int error;
24a8d46a 1622
edfc8a07 1623 iwm_nic_unlock(sc);
24a8d46a 1624
edfc8a07
IV
1625 memset(&cmd, 0, sizeof(cmd));
1626 cmd.scd_queue = qid;
1627 cmd.enable = 1;
1628 cmd.sta_id = sta_id;
1629 cmd.tx_fifo = fifo;
1630 cmd.aggregate = 0;
1631 cmd.window = IWM_FRAME_LIMIT;
1632
1633 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1634 sizeof(cmd), &cmd);
1635 if (error) {
1636 device_printf(sc->sc_dev,
1637 "cannot enable txq %d\n", qid);
1638 return error;
1639 }
1640
1641 if (!iwm_nic_lock(sc))
1642 return EBUSY;
1643 }
1644
1645 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1646 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
24a8d46a
MD
1647
1648 iwm_nic_unlock(sc);
1649
edfc8a07 1650 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
24a8d46a 1651 __func__, qid, fifo);
edfc8a07
IV
1652
1653 return 0;
24a8d46a
MD
1654}
1655
1656static int
1657iwm_post_alive(struct iwm_softc *sc)
1658{
1659 int nwords;
1660 int error, chnl;
edfc8a07 1661 uint32_t base;
24a8d46a
MD
1662
1663 if (!iwm_nic_lock(sc))
1664 return EBUSY;
1665
edfc8a07
IV
1666 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1667 if (sc->sched_base != base) {
24a8d46a 1668 device_printf(sc->sc_dev,
edfc8a07
IV
1669 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1670 __func__, sc->sched_base, base);
24a8d46a
MD
1671 }
1672
1673 iwm_ict_reset(sc);
1674
1675 /* Clear TX scheduler state in SRAM. */
1676 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1677 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1678 / sizeof(uint32_t);
1679 error = iwm_write_mem(sc,
1680 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1681 NULL, nwords);
1682 if (error)
1683 goto out;
1684
1685 /* Set physical address of TX scheduler rings (1KB aligned). */
1686 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1687
1688 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1689
edfc8a07
IV
1690 iwm_nic_unlock(sc);
1691
24a8d46a 1692 /* enable command channel */
edfc8a07
IV
1693 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1694 if (error)
1695 return error;
1696
1697 if (!iwm_nic_lock(sc))
1698 return EBUSY;
24a8d46a
MD
1699
1700 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1701
1702 /* Enable DMA channels. */
1703 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1704 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1705 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1706 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1707 }
1708
1709 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1710 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1711
1712 /* Enable L1-Active */
e8951a47
IV
1713 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1714 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1715 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1716 }
24a8d46a
MD
1717
1718 out:
1719 iwm_nic_unlock(sc);
1720 return error;
1721}
1722
1723/*
1724 * NVM read access and content parsing. We do not support
1725 * external NVM or writing NVM.
1726 * iwlwifi/mvm/nvm.c
1727 */
1728
39f8331b
IV
1729#define IWM_NVM_HW_SECTION_NUM_FAMILY_7000 0
1730#define IWM_NVM_HW_SECTION_NUM_FAMILY_8000 10
24a8d46a
MD
1731
1732/* Default NVM size to read */
edfc8a07 1733#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
24a8d46a
MD
1734
1735#define IWM_NVM_WRITE_OPCODE 1
1736#define IWM_NVM_READ_OPCODE 0
1737
edfc8a07 1738/* load nvm chunk response */
250a1c33
IV
1739enum {
1740 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1741 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1742};
edfc8a07 1743
24a8d46a
MD
1744static int
1745iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1746 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1747{
24a8d46a
MD
1748 struct iwm_nvm_access_cmd nvm_access_cmd = {
1749 .offset = htole16(offset),
1750 .length = htole16(length),
1751 .type = htole16(section),
1752 .op_code = IWM_NVM_READ_OPCODE,
1753 };
1754 struct iwm_nvm_access_resp *nvm_resp;
1755 struct iwm_rx_packet *pkt;
1756 struct iwm_host_cmd cmd = {
1757 .id = IWM_NVM_ACCESS_CMD,
250a1c33 1758 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
24a8d46a
MD
1759 .data = { &nvm_access_cmd, },
1760 };
250a1c33 1761 int ret, bytes_read, offset_read;
24a8d46a
MD
1762 uint8_t *resp_data;
1763
1764 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1765
1766 ret = iwm_send_cmd(sc, &cmd);
edfc8a07
IV
1767 if (ret) {
1768 device_printf(sc->sc_dev,
1769 "Could not send NVM_ACCESS command (error=%d)\n", ret);
24a8d46a 1770 return ret;
edfc8a07 1771 }
24a8d46a
MD
1772
1773 pkt = cmd.resp_pkt;
24a8d46a
MD
1774
1775 /* Extract NVM response */
1776 nvm_resp = (void *)pkt->data;
24a8d46a
MD
1777 ret = le16toh(nvm_resp->status);
1778 bytes_read = le16toh(nvm_resp->length);
1779 offset_read = le16toh(nvm_resp->offset);
1780 resp_data = nvm_resp->data;
1781 if (ret) {
250a1c33
IV
1782 if ((offset != 0) &&
1783 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1784 /*
1785 * meaning of NOT_VALID_ADDRESS:
1786 * driver try to read chunk from address that is
1787 * multiple of 2K and got an error since addr is empty.
1788 * meaning of (offset != 0): driver already
1789 * read valid data from another chunk so this case
1790 * is not an error.
1791 */
1792 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1793 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1794 offset);
1795 *len = 0;
1796 ret = 0;
1797 } else {
1798 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1799 "NVM access command failed with status %d\n", ret);
1800 ret = EIO;
1801 }
24a8d46a
MD
1802 goto exit;
1803 }
1804
1805 if (offset_read != offset) {
1806 device_printf(sc->sc_dev,
edfc8a07
IV
1807 "NVM ACCESS response with invalid offset %d\n",
1808 offset_read);
1809 ret = EINVAL;
1810 goto exit;
1811 }
1812
1813 if (bytes_read > length) {
1814 device_printf(sc->sc_dev,
1815 "NVM ACCESS response with too much data "
250a1c33 1816 "(%d bytes requested, %d bytes received)\n",
edfc8a07 1817 length, bytes_read);
24a8d46a
MD
1818 ret = EINVAL;
1819 goto exit;
1820 }
1821
39f8331b 1822 /* Write data to NVM */
24a8d46a
MD
1823 memcpy(data + offset, resp_data, bytes_read);
1824 *len = bytes_read;
1825
1826 exit:
1827 iwm_free_resp(sc, &cmd);
1828 return ret;
1829}
1830
1831/*
1832 * Reads an NVM section completely.
edfc8a07 1833 * NICs prior to 7000 family don't have a real NVM, but just read
24a8d46a
MD
1834 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1835 * by uCode, we need to manually check in this case that we don't
1836 * overflow and try to read more than the EEPROM size.
1837 * For 7000 family NICs, we supply the maximal size we can read, and
1838 * the uCode fills the response with as much data as we can,
1839 * without overflowing, so no check is needed.
1840 */
1841static int
1842iwm_nvm_read_section(struct iwm_softc *sc,
39f8331b 1843 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
24a8d46a 1844{
39f8331b
IV
1845 uint16_t seglen, length, offset = 0;
1846 int ret;
edfc8a07 1847
39f8331b
IV
1848 /* Set nvm section read length */
1849 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
24a8d46a 1850
39f8331b 1851 seglen = length;
24a8d46a 1852
39f8331b
IV
1853 /* Read the NVM until exhausted (reading less than requested) */
1854 while (seglen == length) {
1855 /* Check no memory assumptions fail and cause an overflow */
1856 if ((size_read + offset + length) >
1857 sc->eeprom_size) {
1858 device_printf(sc->sc_dev,
1859 "EEPROM size is too small for NVM\n");
1860 return ENOBUFS;
24a8d46a 1861 }
39f8331b
IV
1862
1863 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1864 if (ret) {
1865 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1866 "Cannot read NVM from section %d offset %d, length %d\n",
1867 section, offset, length);
1868 return ret;
1869 }
1870 offset += seglen;
24a8d46a
MD
1871 }
1872
39f8331b
IV
1873 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1874 "NVM section %d read completed\n", section);
1875 *len = offset;
1876 return 0;
24a8d46a
MD
1877}
1878
24a8d46a 1879/* NVM offsets (in words) definitions */
edfc8a07 1880enum iwm_nvm_offsets {
24a8d46a
MD
1881 /* NVM HW-Section offset (in words) definitions */
1882 IWM_HW_ADDR = 0x15,
1883
1884/* NVM SW-Section offset (in words) definitions */
1885 IWM_NVM_SW_SECTION = 0x1C0,
1886 IWM_NVM_VERSION = 0,
1887 IWM_RADIO_CFG = 1,
1888 IWM_SKU = 2,
1889 IWM_N_HW_ADDRS = 3,
1890 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1891
1892/* NVM calibration section offset (in words) definitions */
1893 IWM_NVM_CALIB_SECTION = 0x2B8,
1894 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1895};
1896
e8951a47
IV
1897enum iwm_8000_nvm_offsets {
1898 /* NVM HW-Section offset (in words) definitions */
1899 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1900 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1901 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1902 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1903 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1904
1905 /* NVM SW-Section offset (in words) definitions */
1906 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1907 IWM_NVM_VERSION_8000 = 0,
1908 IWM_RADIO_CFG_8000 = 0,
1909 IWM_SKU_8000 = 2,
1910 IWM_N_HW_ADDRS_8000 = 3,
1911
1912 /* NVM REGULATORY -Section offset (in words) definitions */
1913 IWM_NVM_CHANNELS_8000 = 0,
1914 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1915 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1916 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1917
1918 /* NVM calibration section offset (in words) definitions */
1919 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1920 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1921};
1922
24a8d46a
MD
1923/* SKU Capabilities (actual values from NVM definition) */
1924enum nvm_sku_bits {
1925 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1926 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1927 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1928 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1929};
1930
1931/* radio config bits (actual values from NVM definition) */
1932#define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1933#define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1934#define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1935#define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1936#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1937#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1938
e8951a47
IV
1939#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1940#define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1941#define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1942#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1943#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1944#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1945
24a8d46a
MD
1946#define DEFAULT_MAX_TX_POWER 16
1947
1948/**
1949 * enum iwm_nvm_channel_flags - channel flags in NVM
1950 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1951 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1952 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1953 * @IWM_NVM_CHANNEL_RADAR: radar detection required
39f8331b 1954 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
24a8d46a
MD
1955 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1956 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1957 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1958 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1959 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1960 */
1961enum iwm_nvm_channel_flags {
1962 IWM_NVM_CHANNEL_VALID = (1 << 0),
1963 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1964 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1965 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1966 IWM_NVM_CHANNEL_DFS = (1 << 7),
1967 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1968 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1969 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1970 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1971};
1972
39f8331b
IV
1973/* lower blocks contain EEPROM image and calibration data */
1974#define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(uint16_t)) /* 16 KB */
1975#define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(uint16_t)) /* 32 KB */
1976
24a8d46a 1977/*
77de6c2d 1978 * Translate EEPROM flags to net80211.
24a8d46a 1979 */
77de6c2d
IV
1980static uint32_t
1981iwm_eeprom_channel_flags(uint16_t ch_flags)
24a8d46a 1982{
77de6c2d
IV
1983 uint32_t nflags;
1984
1985 nflags = 0;
1986 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1987 nflags |= IEEE80211_CHAN_PASSIVE;
1988 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1989 nflags |= IEEE80211_CHAN_NOADHOC;
1990 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1991 nflags |= IEEE80211_CHAN_DFS;
1992 /* Just in case. */
1993 nflags |= IEEE80211_CHAN_NOADHOC;
24a8d46a 1994 }
24a8d46a 1995
77de6c2d 1996 return (nflags);
24a8d46a
MD
1997}
1998
1999static void
77de6c2d 2000iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
edfc8a07
IV
2001 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2002 const uint8_t bands[])
24a8d46a 2003{
39f8331b 2004 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
77de6c2d 2005 uint32_t nflags;
24a8d46a 2006 uint16_t ch_flags;
77de6c2d
IV
2007 uint8_t ieee;
2008 int error;
24a8d46a 2009
77de6c2d 2010 for (; ch_idx < ch_num; ch_idx++) {
24a8d46a 2011 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
e8951a47
IV
2012 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2013 ieee = iwm_nvm_channels[ch_idx];
2014 else
2015 ieee = iwm_nvm_channels_8000[ch_idx];
24a8d46a
MD
2016
2017 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2018 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2019 "Ch. %d Flags %x [%sGHz] - No traffic\n",
77de6c2d 2020 ieee, ch_flags,
24a8d46a
MD
2021 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2022 "5.2" : "2.4");
2023 continue;
2024 }
2025
77de6c2d
IV
2026 nflags = iwm_eeprom_channel_flags(ch_flags);
2027 error = ieee80211_add_channel(chans, maxchans, nchans,
2028 ieee, 0, 0, nflags, bands);
2029 if (error != 0)
2030 break;
24a8d46a
MD
2031
2032 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2033 "Ch. %d Flags %x [%sGHz] - Added\n",
77de6c2d 2034 ieee, ch_flags,
24a8d46a
MD
2035 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2036 "5.2" : "2.4");
2037 }
77de6c2d
IV
2038}
2039
2040static void
2041iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2042 struct ieee80211_channel chans[])
2043{
2044 struct iwm_softc *sc = ic->ic_softc;
39f8331b 2045 struct iwm_nvm_data *data = sc->nvm_data;
77de6c2d 2046 uint8_t bands[howmany(IEEE80211_MODE_MAX, 8)];
e8951a47 2047 size_t ch_num;
77de6c2d
IV
2048
2049 memset(bands, 0, sizeof(bands));
2050 /* 1-13: 11b/g channels. */
2051 setbit(bands, IEEE80211_MODE_11B);
2052 setbit(bands, IEEE80211_MODE_11G);
2053 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2054 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2055
2056 /* 14: 11b channel only. */
2057 clrbit(bands, IEEE80211_MODE_11G);
2058 iwm_add_channel_band(sc, chans, maxchans, nchans,
2059 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2060
2061 if (data->sku_cap_band_52GHz_enable) {
e8951a47
IV
2062 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2063 ch_num = nitems(iwm_nvm_channels);
2064 else
2065 ch_num = nitems(iwm_nvm_channels_8000);
77de6c2d
IV
2066 memset(bands, 0, sizeof(bands));
2067 setbit(bands, IEEE80211_MODE_11A);
2068 iwm_add_channel_band(sc, chans, maxchans, nchans,
e8951a47 2069 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
77de6c2d 2070 }
24a8d46a
MD
2071}
2072
e8951a47 2073static void
39f8331b 2074iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
e8951a47
IV
2075 const uint16_t *mac_override, const uint16_t *nvm_hw)
2076{
2077 const uint8_t *hw_addr;
2078
2079 if (mac_override) {
2080 static const uint8_t reserved_mac[] = {
2081 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2082 };
2083
2084 hw_addr = (const uint8_t *)(mac_override +
2085 IWM_MAC_ADDRESS_OVERRIDE_8000);
2086
2087 /*
2088 * Store the MAC address from MAO section.
2089 * No byte swapping is required in MAO section
2090 */
2091 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2092
2093 /*
2094 * Force the use of the OTP MAC address in case of reserved MAC
2095 * address in the NVM, or if address is given but invalid.
2096 */
2097 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2098 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2099 iwm_is_valid_ether_addr(data->hw_addr) &&
2100 !IEEE80211_IS_MULTICAST(data->hw_addr))
2101 return;
2102
2103 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2104 "%s: mac address from nvm override section invalid\n",
2105 __func__);
2106 }
2107
2108 if (nvm_hw) {
2109 /* read the mac address from WFMP registers */
2110 uint32_t mac_addr0 =
2111 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2112 uint32_t mac_addr1 =
2113 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2114
2115 hw_addr = (const uint8_t *)&mac_addr0;
2116 data->hw_addr[0] = hw_addr[3];
2117 data->hw_addr[1] = hw_addr[2];
2118 data->hw_addr[2] = hw_addr[1];
2119 data->hw_addr[3] = hw_addr[0];
2120
2121 hw_addr = (const uint8_t *)&mac_addr1;
2122 data->hw_addr[4] = hw_addr[1];
2123 data->hw_addr[5] = hw_addr[0];
2124
2125 return;
2126 }
2127
2128 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2129 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2130}
2131
2132static int
2133iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2134 const uint16_t *phy_sku)
2135{
2136 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2137 return le16_to_cpup(nvm_sw + IWM_SKU);
2138
2139 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2140}
2141
2142static int
2143iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2144{
2145 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2146 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2147 else
2148 return le32_to_cpup((const uint32_t *)(nvm_sw +
2149 IWM_NVM_VERSION_8000));
2150}
2151
2152static int
2153iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2154 const uint16_t *phy_sku)
2155{
2156 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2157 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2158
2159 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2160}
2161
2162static int
b9794186 2163iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
e8951a47
IV
2164{
2165 int n_hw_addr;
2166
2167 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2168 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2169
2170 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2171
2172 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2173}
2174
2175static void
2176iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2177 uint32_t radio_cfg)
2178{
2179 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2180 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2181 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2182 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2183 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2184 return;
2185 }
2186
2187 /* set the radio configuration for family 8000 */
2188 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2189 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2190 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2191 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2192 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2193 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2194}
2195
24a8d46a 2196static int
39f8331b
IV
2197iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2198 const uint16_t *nvm_hw, const uint16_t *mac_override)
2199{
2200#ifdef notyet /* for FAMILY 9000 */
2201 if (cfg->mac_addr_from_csr) {
2202 iwm_set_hw_address_from_csr(sc, data);
2203 } else
2204#endif
2205 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2206 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2207
2208 /* The byte order is little endian 16 bit, meaning 214365 */
2209 data->hw_addr[0] = hw_addr[1];
2210 data->hw_addr[1] = hw_addr[0];
2211 data->hw_addr[2] = hw_addr[3];
2212 data->hw_addr[3] = hw_addr[2];
2213 data->hw_addr[4] = hw_addr[5];
2214 data->hw_addr[5] = hw_addr[4];
2215 } else {
2216 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2217 }
2218
2219 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2220 device_printf(sc->sc_dev, "no valid mac address was found\n");
2221 return EINVAL;
2222 }
2223
2224 return 0;
2225}
2226
2227static struct iwm_nvm_data *
24a8d46a 2228iwm_parse_nvm_data(struct iwm_softc *sc,
e8951a47
IV
2229 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2230 const uint16_t *nvm_calib, const uint16_t *mac_override,
2231 const uint16_t *phy_sku, const uint16_t *regulatory)
24a8d46a 2232{
39f8331b 2233 struct iwm_nvm_data *data;
e8951a47 2234 uint32_t sku, radio_cfg;
24a8d46a 2235
39f8331b
IV
2236 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2237 data = kmalloc(sizeof(*data) +
2238 IWM_NUM_CHANNELS * sizeof(uint16_t),
2239 M_DEVBUF, M_WAITOK | M_ZERO);
2240 } else {
2241 data = kmalloc(sizeof(*data) +
2242 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2243 M_DEVBUF, M_WAITOK | M_ZERO);
2244 }
2245 if (!data)
2246 return NULL;
2247
e8951a47 2248 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
24a8d46a 2249
e8951a47
IV
2250 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2251 iwm_set_radio_cfg(sc, data, radio_cfg);
24a8d46a 2252
e8951a47 2253 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
24a8d46a
MD
2254 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2255 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2256 data->sku_cap_11n_enable = 0;
2257
e8951a47 2258 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
24a8d46a 2259
39f8331b
IV
2260 /* If no valid mac address was found - bail out */
2261 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2262 kfree(data, M_DEVBUF);
2263 return NULL;
e8951a47
IV
2264 }
2265
2266 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2267 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2268 IWM_NUM_CHANNELS * sizeof(uint16_t));
2269 } else {
2270 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2271 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2272 }
24a8d46a 2273
39f8331b 2274 return data;
24a8d46a
MD
2275}
2276
39f8331b
IV
2277static void
2278iwm_free_nvm_data(struct iwm_nvm_data *data)
2279{
2280 if (data != NULL)
2281 kfree(data, M_DEVBUF);
2282}
2283
2284static struct iwm_nvm_data *
24a8d46a
MD
2285iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2286{
e8951a47 2287 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
24a8d46a
MD
2288
2289 /* Checking for required sections */
e8951a47
IV
2290 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2291 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
39f8331b 2292 !sections[sc->nvm_hw_section_num].data) {
e8951a47
IV
2293 device_printf(sc->sc_dev,
2294 "Can't parse empty OTP/NVM sections\n");
39f8331b 2295 return NULL;
e8951a47 2296 }
e8951a47
IV
2297 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2298 /* SW and REGULATORY sections are mandatory */
2299 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2300 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2301 device_printf(sc->sc_dev,
2302 "Can't parse empty OTP/NVM sections\n");
39f8331b 2303 return NULL;
e8951a47
IV
2304 }
2305 /* MAC_OVERRIDE or at least HW section must exist */
39f8331b 2306 if (!sections[sc->nvm_hw_section_num].data &&
e8951a47
IV
2307 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2308 device_printf(sc->sc_dev,
2309 "Can't parse mac_address, empty sections\n");
39f8331b 2310 return NULL;
e8951a47
IV
2311 }
2312
2313 /* PHY_SKU section is mandatory in B0 */
2314 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2315 device_printf(sc->sc_dev,
2316 "Can't parse phy_sku in B0, empty sections\n");
39f8331b 2317 return NULL;
e8951a47 2318 }
e8951a47
IV
2319 } else {
2320 panic("unknown device family %d\n", sc->sc_device_family);
24a8d46a
MD
2321 }
2322
39f8331b 2323 hw = (const uint16_t *) sections[sc->nvm_hw_section_num].data;
24a8d46a 2324 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
edfc8a07
IV
2325 calib = (const uint16_t *)
2326 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
e8951a47
IV
2327 regulatory = (const uint16_t *)
2328 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2329 mac_override = (const uint16_t *)
2330 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2331 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
edfc8a07
IV
2332
2333 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2334 phy_sku, regulatory);
24a8d46a
MD
2335}
2336
2337static int
2338iwm_nvm_init(struct iwm_softc *sc)
2339{
39f8331b
IV
2340 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2341 int i, ret, section;
2342 uint32_t size_read = 0;
2343 uint8_t *nvm_buffer, *temp;
24a8d46a 2344 uint16_t len;
24a8d46a 2345
39f8331b 2346 memset(nvm_sections, 0, sizeof(nvm_sections));
edfc8a07 2347
39f8331b
IV
2348 if (sc->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2349 return EINVAL;
24a8d46a 2350
39f8331b
IV
2351 /* load NVM values from nic */
2352 /* Read From FW NVM */
2353 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
24a8d46a 2354
39f8331b
IV
2355 nvm_buffer = kmalloc(sc->eeprom_size, M_DEVBUF, M_INTWAIT | M_ZERO);
2356 if (!nvm_buffer)
2357 return ENOMEM;
2358 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2359 /* we override the constness for initial read */
2360 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2361 &len, size_read);
2362 if (ret)
edfc8a07 2363 continue;
39f8331b
IV
2364 size_read += len;
2365 temp = kmalloc(len, M_DEVBUF, M_INTWAIT);
2366 if (!temp) {
2367 ret = ENOMEM;
24a8d46a
MD
2368 break;
2369 }
39f8331b
IV
2370 memcpy(temp, nvm_buffer, len);
2371
2372 nvm_sections[section].data = temp;
24a8d46a
MD
2373 nvm_sections[section].length = len;
2374 }
39f8331b
IV
2375 if (!size_read)
2376 device_printf(sc->sc_dev, "OTP is blank\n");
2377 kfree(nvm_buffer, M_DEVBUF);
24a8d46a 2378
39f8331b
IV
2379 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2380 if (!sc->nvm_data)
2381 return EINVAL;
2382 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2383 "nvm version = %x\n", sc->nvm_data->nvm_version);
2384
2385 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
4dc3b92f
IV
2386 if (nvm_sections[i].data != NULL)
2387 kfree(nvm_sections[i].data, M_DEVBUF);
2388 }
2389
39f8331b 2390 return 0;
24a8d46a
MD
2391}
2392
2393/*
2394 * Firmware loading gunk. This is kind of a weird hybrid between the
2395 * iwn driver and the Linux iwlwifi driver.
2396 */
2397
2398static int
edfc8a07 2399iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
24a8d46a 2400 const uint8_t *section, uint32_t byte_cnt)
edfc8a07
IV
2401{
2402 int error = EINVAL;
2403 uint32_t chunk_sz, offset;
2404
2405 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2406
2407 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2408 uint32_t addr, len;
2409 const uint8_t *data;
2410
2411 addr = dst_addr + offset;
2412 len = MIN(chunk_sz, byte_cnt - offset);
2413 data = section + offset;
2414
2415 error = iwm_firmware_load_chunk(sc, addr, data, len);
2416 if (error)
2417 break;
2418 }
2419
2420 return error;
2421}
2422
2423static int
2424iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2425 const uint8_t *chunk, uint32_t byte_cnt)
24a8d46a
MD
2426{
2427 struct iwm_dma_info *dma = &sc->fw_dma;
2428 int error;
2429
edfc8a07
IV
2430 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2431 memcpy(dma->vaddr, chunk, byte_cnt);
24a8d46a
MD
2432 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2433
edfc8a07
IV
2434 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2435 dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2436 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2437 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2438 }
24a8d46a
MD
2439
2440 sc->sc_fw_chunk_done = 0;
2441
edfc8a07
IV
2442 if (!iwm_nic_lock(sc))
2443 return EBUSY;
2444
24a8d46a
MD
2445 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2446 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2447 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2448 dst_addr);
2449 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2450 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2451 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2452 (iwm_get_dma_hi_addr(dma->paddr)
2453 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2454 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2455 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2456 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2457 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2458 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2459 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2460 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2461 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2462
2463 iwm_nic_unlock(sc);
2464
2465 /* wait 1s for this segment to load */
45bc40b1
MD
2466 error = 0;
2467 while (!sc->sc_fw_chunk_done) {
b0b29253 2468#if defined(__DragonFly__)
303bb3ad 2469 error = lksleep(&sc->sc_fw, &sc->sc_lk, 0, "iwmfw", hz);
b0b29253
MD
2470#else
2471 error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2472#endif
45bc40b1 2473 if (error)
24a8d46a 2474 break;
45bc40b1 2475 }
24a8d46a 2476
edfc8a07
IV
2477 if (!sc->sc_fw_chunk_done) {
2478 device_printf(sc->sc_dev,
2479 "fw chunk addr 0x%x len %d failed to load\n",
2480 dst_addr, byte_cnt);
2481 }
2482
2483 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2484 dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2485 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2486 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2487 iwm_nic_unlock(sc);
2488 }
2489
24a8d46a
MD
2490 return error;
2491}
2492
e8951a47
IV
2493int
2494iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2495 int cpu, int *first_ucode_section)
2496{
2497 int shift_param;
2498 int i, error = 0, sec_num = 0x1;
2499 uint32_t val, last_read_idx = 0;
2500 const void *data;
2501 uint32_t dlen;
2502 uint32_t offset;
2503
2504 if (cpu == 1) {
2505 shift_param = 0;
2506 *first_ucode_section = 0;
2507 } else {
2508 shift_param = 16;
2509 (*first_ucode_section)++;
2510 }
2511
2512 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2513 last_read_idx = i;
2514 data = fws->fw_sect[i].fws_data;
2515 dlen = fws->fw_sect[i].fws_len;
2516 offset = fws->fw_sect[i].fws_devoff;
2517
2518 /*
2519 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2520 * CPU1 to CPU2.
2521 * PAGING_SEPARATOR_SECTION delimiter - separate between
2522 * CPU2 non paged to CPU2 paging sec.
2523 */
2524 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2525 offset == IWM_PAGING_SEPARATOR_SECTION)
2526 break;
2527
2528 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2529 "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2530 i, offset, dlen, cpu);
2531
2532 if (dlen > sc->sc_fwdmasegsz) {
2533 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2534 "chunk %d too large (%d bytes)\n", i, dlen);
2535 error = EFBIG;
2536 } else {
2537 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2538 }
2539 if (error) {
2540 device_printf(sc->sc_dev,
2541 "could not load firmware chunk %d (error %d)\n",
2542 i, error);
2543 return error;
2544 }
2545
2546 /* Notify the ucode of the loaded section number and status */
2547 if (iwm_nic_lock(sc)) {
2548 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2549 val = val | (sec_num << shift_param);
2550 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2551 sec_num = (sec_num << 1) | 0x1;
2552 iwm_nic_unlock(sc);
2553
2554 /*
2555 * The firmware won't load correctly without this delay.
2556 */
2557 DELAY(8000);
2558 }
2559 }
2560
2561 *first_ucode_section = last_read_idx;
2562
2563 if (iwm_nic_lock(sc)) {
2564 if (cpu == 1)
2565 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2566 else
2567 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2568 iwm_nic_unlock(sc);
2569 }
2570
2571 return 0;
2572}
2573
2574int
2575iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2576{
2577 struct iwm_fw_sects *fws;
2578 int error = 0;
2579 int first_ucode_section;
2580
2581 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2582 ucode_type);
2583
2584 fws = &sc->sc_fw.fw_sects[ucode_type];
2585
2586 /* configure the ucode to be ready to get the secured image */
2587 /* release CPU reset */
2588 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2589
2590 /* load to FW the binary Secured sections of CPU1 */
2591 error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2592 if (error)
2593 return error;
2594
2595 /* load to FW the binary sections of CPU2 */
2596 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2597}
2598
24a8d46a 2599static int
edfc8a07 2600iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
24a8d46a
MD
2601{
2602 struct iwm_fw_sects *fws;
edfc8a07 2603 int error, i;
24a8d46a
MD
2604 const void *data;
2605 uint32_t dlen;
2606 uint32_t offset;
2607
2608 sc->sc_uc.uc_intr = 0;
2609
2610 fws = &sc->sc_fw.fw_sects[ucode_type];
2611 for (i = 0; i < fws->fw_count; i++) {
2612 data = fws->fw_sect[i].fws_data;
2613 dlen = fws->fw_sect[i].fws_len;
2614 offset = fws->fw_sect[i].fws_devoff;
2615 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2616 "LOAD FIRMWARE type %d offset %u len %d\n",
2617 ucode_type, offset, dlen);
edfc8a07
IV
2618 if (dlen > sc->sc_fwdmasegsz) {
2619 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2620 "chunk %d too large (%d bytes)\n", i, dlen);
2621 error = EFBIG;
2622 } else {
2623 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2624 }
24a8d46a
MD
2625 if (error) {
2626 device_printf(sc->sc_dev,
edfc8a07
IV
2627 "could not load firmware chunk %u of %u "
2628 "(error=%d)\n", i, fws->fw_count, error);
24a8d46a
MD
2629 return error;
2630 }
2631 }
2632
24a8d46a
MD
2633 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2634
edfc8a07
IV
2635 return 0;
2636}
2637
2638static int
2639iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2640{
2641 int error, w;
2642
e8951a47
IV
2643 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2644 error = iwm_load_firmware_8000(sc, ucode_type);
2645 else
2646 error = iwm_load_firmware_7000(sc, ucode_type);
edfc8a07
IV
2647 if (error)
2648 return error;
2649
2650 /* wait for the firmware to load */
24a8d46a 2651 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
b0b29253 2652#if defined(__DragonFly__)
303bb3ad 2653 error = lksleep(&sc->sc_uc, &sc->sc_lk, 0, "iwmuc", hz/10);
b0b29253
MD
2654#else
2655 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2656#endif
24a8d46a 2657 }
edfc8a07
IV
2658 if (error || !sc->sc_uc.uc_ok) {
2659 device_printf(sc->sc_dev, "could not load firmware\n");
e8951a47
IV
2660 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2661 device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2662 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2663 device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2664 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2665 }
edfc8a07
IV
2666 }
2667
2668 /*
2669 * Give the firmware some time to initialize.
2670 * Accessing it too early causes errors.
2671 */
303bb3ad 2672 lksleep(&w, &sc->sc_lk, 0, "iwmfwinit", hz);
24a8d46a
MD
2673
2674 return error;
2675}
2676
24a8d46a
MD
2677static int
2678iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2679{
2680 int error;
2681
2682 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2683
2684 if ((error = iwm_nic_init(sc)) != 0) {
2685 device_printf(sc->sc_dev, "unable to init nic\n");
2686 return error;
2687 }
2688
2689 /* make sure rfkill handshake bits are cleared */
2690 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2691 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2692 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2693
2694 /* clear (again), then enable host interrupts */
2695 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2696 iwm_enable_interrupts(sc);
2697
2698 /* really make sure rfkill handshake bits are cleared */
2699 /* maybe we should write a few times more? just to make sure */
2700 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2701 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702
2703 /* Load the given image to the HW */
2704 return iwm_load_firmware(sc, ucode_type);
2705}
2706
24a8d46a
MD
2707static int
2708iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2709{
2710 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2711 .valid = htole32(valid_tx_ant),
2712 };
2713
2714 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2715 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2716}
2717
24a8d46a
MD
2718static int
2719iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2720{
2721 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2722 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2723
2724 /* Set parameters */
2725 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2726 phy_cfg_cmd.calib_control.event_trigger =
2727 sc->sc_default_calib[ucode_type].event_trigger;
2728 phy_cfg_cmd.calib_control.flow_trigger =
2729 sc->sc_default_calib[ucode_type].flow_trigger;
2730
2731 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2732 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2733 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2734 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2735}
2736
2737static int
2738iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2739 enum iwm_ucode_type ucode_type)
2740{
2741 enum iwm_ucode_type old_type = sc->sc_uc_current;
2742 int error;
2743
45bc40b1 2744 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
edfc8a07 2745 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
45bc40b1 2746 error);
24a8d46a 2747 return error;
45bc40b1 2748 }
24a8d46a
MD
2749
2750 sc->sc_uc_current = ucode_type;
2751 error = iwm_start_fw(sc, ucode_type);
2752 if (error) {
edfc8a07 2753 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
24a8d46a
MD
2754 sc->sc_uc_current = old_type;
2755 return error;
2756 }
2757
29fcb331 2758 error = iwm_post_alive(sc);
45bc40b1 2759 if (error) {
edfc8a07 2760 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
45bc40b1
MD
2761 }
2762 return error;
24a8d46a
MD
2763}
2764
2765/*
2766 * mvm misc bits
2767 */
2768
24a8d46a
MD
2769static int
2770iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2771{
2772 int error;
2773
2774 /* do not operate with rfkill switch turned on */
2775 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2776 device_printf(sc->sc_dev,
2777 "radio is disabled by hardware switch\n");
2778 return EPERM;
2779 }
2780
2781 sc->sc_init_complete = 0;
2782 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
29fcb331
IV
2783 IWM_UCODE_TYPE_INIT)) != 0) {
2784 device_printf(sc->sc_dev, "failed to load init firmware\n");
24a8d46a 2785 return error;
29fcb331 2786 }
24a8d46a
MD
2787
2788 if (justnvm) {
2789 if ((error = iwm_nvm_init(sc)) != 0) {
2790 device_printf(sc->sc_dev, "failed to read nvm\n");
2791 return error;
2792 }
39f8331b 2793 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
24a8d46a 2794
24a8d46a
MD
2795 return 0;
2796 }
2797
edfc8a07
IV
2798 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2799 device_printf(sc->sc_dev,
2800 "failed to send bt coex configuration: %d\n", error);
2801 return error;
2802 }
2803
2804 /* Init Smart FIFO. */
2805 error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2806 if (error != 0)
2807 return error;
2808
24a8d46a 2809 /* Send TX valid antennas before triggering calibrations */
edfc8a07
IV
2810 if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2811 device_printf(sc->sc_dev,
2812 "failed to send antennas before calibration: %d\n", error);
24a8d46a 2813 return error;
45bc40b1 2814 }
24a8d46a
MD
2815
2816 /*
edfc8a07
IV
2817 * Send phy configurations command to init uCode
2818 * to start the 16.0 uCode init image internal calibrations.
2819 */
24a8d46a
MD
2820 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2821 device_printf(sc->sc_dev,
2822 "%s: failed to run internal calibration: %d\n",
2823 __func__, error);
2824 return error;
2825 }
2826
2827 /*
2828 * Nothing to do but wait for the init complete notification
2829 * from the firmware
2830 */
45bc40b1 2831 while (!sc->sc_init_complete) {
b0b29253 2832#if defined(__DragonFly__)
303bb3ad 2833 error = lksleep(&sc->sc_init_complete, &sc->sc_lk,
b0b29253
MD
2834 0, "iwminit", 2*hz);
2835#else
2836 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2837 0, "iwminit", 2*hz);
2838#endif
2839 if (error) {
edfc8a07 2840 device_printf(sc->sc_dev, "init complete failed: %d\n",
45bc40b1 2841 sc->sc_init_complete);
24a8d46a 2842 break;
45bc40b1
MD
2843 }
2844 }
24a8d46a 2845
edfc8a07
IV
2846 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2847 sc->sc_init_complete ? "" : "not ");
2848
24a8d46a
MD
2849 return error;
2850}
2851
2852/*
2853 * receive side
2854 */
2855
2856/* (re)stock rx ring, called at init-time and at runtime */
2857static int
2858iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2859{
2860 struct iwm_rx_ring *ring = &sc->rxq;
2861 struct iwm_rx_data *data = &ring->data[idx];
2862 struct mbuf *m;
cc440b26 2863 bus_dmamap_t dmamap = NULL;
f073608f
IV
2864 bus_dma_segment_t seg;
2865 int nsegs, error;
24a8d46a
MD
2866
2867 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2868 if (m == NULL)
2869 return ENOBUFS;
2870
24a8d46a 2871 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
f073608f
IV
2872#if defined(__DragonFly__)
2873 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, ring->spare_map,
2874 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2875#else
2876 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2877 &seg, &nsegs, BUS_DMA_NOWAIT);
2878#endif
2879 if (error != 0) {
24a8d46a 2880 device_printf(sc->sc_dev,
cc440b26 2881 "%s: can't map mbuf, error %d\n", __func__, error);
24a8d46a
MD
2882 goto fail;
2883 }
cc440b26
IV
2884
2885 if (data->m != NULL)
2886 bus_dmamap_unload(ring->data_dmat, data->map);
2887
2888 /* Swap ring->spare_map with data->map */
2889 dmamap = data->map;
2890 data->map = ring->spare_map;
2891 ring->spare_map = dmamap;
2892
24a8d46a 2893 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3d970f14
IV
2894 data->m = m;
2895
24a8d46a 2896 /* Update RX descriptor. */
f073608f
IV
2897 KKASSERT((seg.ds_addr & 255) == 0);
2898 ring->desc[idx] = htole32(seg.ds_addr >> 8);
24a8d46a
MD
2899 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2900 BUS_DMASYNC_PREWRITE);
2901
2902 return 0;
2903fail:
3d970f14 2904 m_freem(m);
24a8d46a
MD
2905 return error;
2906}
2907
24a8d46a
MD
2908#define IWM_RSSI_OFFSET 50
2909static int
2910iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2911{
2912 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2913 uint32_t agc_a, agc_b;
2914 uint32_t val;
2915
2916 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2917 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2918 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2919
2920 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2921 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2922 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2923
2924 /*
2925 * dBm = rssi dB - agc dB - constant.
2926 * Higher AGC (higher radio gain) means lower signal.
2927 */
2928 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2929 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2930 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2931
2932 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2933 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2934 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2935
2936 return max_rssi_dbm;
2937}
2938
24a8d46a
MD
2939/*
2940 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2941 * values are reported by the fw as positive values - need to negate
2942 * to obtain their dBM. Account for missing antennas by replacing 0
2943 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2944 */
2945static int
2946iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2947{
2948 int energy_a, energy_b, energy_c, max_energy;
2949 uint32_t val;
2950
2951 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2952 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2953 IWM_RX_INFO_ENERGY_ANT_A_POS;
2954 energy_a = energy_a ? -energy_a : -256;
2955 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2956 IWM_RX_INFO_ENERGY_ANT_B_POS;
2957 energy_b = energy_b ? -energy_b : -256;
2958 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2959 IWM_RX_INFO_ENERGY_ANT_C_POS;
2960 energy_c = energy_c ? -energy_c : -256;
2961 max_energy = MAX(energy_a, energy_b);
2962 max_energy = MAX(max_energy, energy_c);
2963
2964 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2965 "energy In A %d B %d C %d , and max %d\n",
2966 energy_a, energy_b, energy_c, max_energy);
2967
2968 return max_energy;
2969}
2970
2971static void
2972iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2973 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2974{
2975 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2976
2977 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2978 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2979
2980 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2981}
2982
2983/*
2984 * Retrieve the average noise (in dBm) among receivers.
2985 */
2986static int
2987iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2988{
2989 int i, total, nbant, noise;
2990
2991 total = nbant = noise = 0;
2992 for (i = 0; i < 3; i++) {
2993 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2994 if (noise) {
2995 total += noise;
2996 nbant++;
2997 }
2998 }
2999
3000 /* There should be at least one antenna but check anyway. */
3001 return (nbant == 0) ? -127 : (total / nbant) - 107;
3002}
3003
3004/*
3005 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3006 *
3007 * Handles the actual data of the Rx packet from the fw
3008 */
3009static void
3010iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3011 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3012{
77de6c2d 3013 struct ieee80211com *ic = &sc->sc_ic;
24a8d46a
MD
3014 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3015 struct ieee80211_frame *wh;
3016 struct ieee80211_node *ni;
3017 struct ieee80211_rx_stats rxs;
3018 struct mbuf *m;
3019 struct iwm_rx_phy_info *phy_info;
3020 struct iwm_rx_mpdu_res_start *rx_res;
3021 uint32_t len;
3022 uint32_t rx_pkt_status;
3023 int rssi;
3024
3025 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3026
3027 phy_info = &sc->sc_last_phy_info;
3028 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3029 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3030 len = le16toh(rx_res->byte_count);
3031 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3032
3033 m = data->m;
3034 m->m_data = pkt->data + sizeof(*rx_res);
3035 m->m_pkthdr.len = m->m_len = len;
3036
3037 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3038 device_printf(sc->sc_dev,
3039 "dsp size out of range [0,20]: %d\n",
3040 phy_info->cfg_phy_cnt);
3041 return;
3042 }
3043
3044 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3045 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3046 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3047 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3048 return; /* drop */
3049 }
3050
3051 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3052 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3053 } else {
3054 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3055 }
3056 rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
3057 rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */
3058
3059 /* replenish ring for the buffer we're going to feed to the sharks */
3060 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3061 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3062 __func__);
3063 return;
3064 }
3065
3066 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3067
3068 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3069 "%s: phy_info: channel=%d, flags=0x%08x\n",
3070 __func__,
3071 le16toh(phy_info->channel),
3072 le16toh(phy_info->phy_flags));
3073
3074 /*
3075 * Populate an RX state struct with the provided information.
3076 */
3077 bzero(&rxs, sizeof(rxs));
3078 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3079 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3080 rxs.c_ieee = le16toh(phy_info->channel);
3081 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3082 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3083 } else {
3084 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3085 }
3086 rxs.rssi = rssi - sc->sc_noise;
3087 rxs.nf = sc->sc_noise;
3088
3089 if (ieee80211_radiotap_active_vap(vap)) {
3090 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3091
3092 tap->wr_flags = 0;
3093 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3094 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3095 tap->wr_chan_freq = htole16(rxs.c_freq);
3096 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3097 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3098 tap->wr_dbm_antsignal = (int8_t)rssi;
3099 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3100 tap->wr_tsft = phy_info->system_timestamp;
3101 switch (phy_info->rate) {
3102 /* CCK rates. */
3103 case 10: tap->wr_rate = 2; break;
3104 case 20: tap->wr_rate = 4; break;
3105 case 55: tap->wr_rate = 11; break;
3106 case 110: tap->wr_rate = 22; break;
3107 /* OFDM rates. */
3108 case 0xd: tap->wr_rate = 12; break;
3109 case 0xf: tap->wr_rate = 18; break;
3110 case 0x5: tap->wr_rate = 24; break;
3111 case 0x7: tap->wr_rate = 36; break;
3112 case 0x9: tap->wr_rate = 48; break;
3113 case 0xb: tap->wr_rate = 72; break;
3114 case 0x1: tap->wr_rate = 96; break;
3115 case 0x3: tap->wr_rate = 108; break;
3116 /* Unknown rate: should not happen. */
3117 default: tap->wr_rate = 0;
3118 }
3119 }
3120
3121 IWM_UNLOCK(sc);
3122 if (ni != NULL) {
3123 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3124 ieee80211_input_mimo(ni, m, &rxs);
3125 ieee80211_free_node(ni);
3126 } else {
3127 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3128 ieee80211_input_mimo_all(ic, m, &rxs);
3129 }
3130 IWM_LOCK(sc);
3131}
3132
77de6c2d 3133static int
24a8d46a
MD
3134iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3135 struct iwm_node *in)
3136{
3137 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
77de6c2d
IV
3138 struct ieee80211_node *ni = &in->in_ni;
3139 struct ieee80211vap *vap = ni->ni_vap;
24a8d46a
MD
3140 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3141 int failack = tx_resp->failure_frame;
3142
3143 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3144
3145 /* Update rate control statistics. */
77de6c2d
IV
3146 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3147 __func__,
3148 (int) le16toh(tx_resp->status.status),
3149 (int) le16toh(tx_resp->status.sequence),
3150 tx_resp->frame_count,
3151 tx_resp->bt_kill_count,
3152 tx_resp->failure_rts,
3153 tx_resp->failure_frame,
3154 le32toh(tx_resp->initial_rate),
3155 (int) le16toh(tx_resp->wireless_media_time));
3156
24a8d46a
MD
3157 if (status != IWM_TX_STATUS_SUCCESS &&
3158 status != IWM_TX_STATUS_DIRECT_DONE) {
77de6c2d 3159 ieee80211_ratectl_tx_complete(vap, ni,
24a8d46a 3160 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
77de6c2d 3161 return (1);
24a8d46a 3162 } else {
77de6c2d 3163 ieee80211_ratectl_tx_complete(vap, ni,
24a8d46a 3164 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
77de6c2d 3165 return (0);
24a8d46a
MD
3166 }
3167}
3168
3169static void
3170iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3171 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3172{
3173 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3174 int idx = cmd_hdr->idx;
3175 int qid = cmd_hdr->qid;
3176 struct iwm_tx_ring *ring = &sc->txq[qid];
3177 struct iwm_tx_data *txd = &ring->data[idx];
3178 struct iwm_node *in = txd->in;
77de6c2d
IV
3179 struct mbuf *m = txd->m;
3180 int status;
3181
3182 KASSERT(txd->done == 0, ("txd not done"));
3183 KASSERT(txd->in != NULL, ("txd without node"));
3184 KASSERT(txd->m != NULL, ("txd without mbuf"));
24a8d46a 3185
24a8d46a
MD
3186 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3187
3188 sc->sc_tx_timer = 0;
3189
77de6c2d 3190 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
24a8d46a
MD
3191
3192 /* Unmap and free mbuf. */
3193 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3194 bus_dmamap_unload(ring->data_dmat, txd->map);
24a8d46a
MD
3195
3196 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3197 "free txd %p, in %p\n", txd, txd->in);
24a8d46a 3198 txd->done = 1;
24a8d46a
MD
3199 txd->m = NULL;
3200 txd->in = NULL;
77de6c2d
IV
3201
3202 ieee80211_tx_complete(&in->in_ni, m, status);
24a8d46a
MD
3203
3204 if (--ring->queued < IWM_TX_RING_LOMARK) {
3205 sc->qfullmsk &= ~(1 << ring->qid);
77de6c2d 3206 if (sc->qfullmsk == 0) {
24a8d46a
MD
3207 /*
3208 * Well, we're in interrupt context, but then again
3209 * I guess net80211 does all sorts of stunts in
3210 * interrupt context, so maybe this is no biggie.
3211 */
77de6c2d 3212 iwm_start(sc);
24a8d46a
MD
3213 }
3214 }
3215}
3216
3217/*
3218 * transmit side
3219 */
3220
3221/*
3222 * Process a "command done" firmware notification. This is where we wakeup
3223 * processes waiting for a synchronous command completion.
3224 * from if_iwn
3225 */
3226static void
3227iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3228{
3229 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3230 struct iwm_tx_data *data;
3231
3232 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3233 return; /* Not a command ack. */
3234 }
3235
3236 data = &ring->data[pkt->hdr.idx];
3237
3238 /* If the command was mapped in an mbuf, free it. */
3239 if (data->m != NULL) {
3240 bus_dmamap_sync(ring->data_dmat, data->map,
3241 BUS_DMASYNC_POSTWRITE);
3242 bus_dmamap_unload(ring->data_dmat, data->map);
3243 m_freem(data->m);
3244 data->m = NULL;
3245 }
3246 wakeup(&ring->desc[pkt->hdr.idx]);
3247}
3248
3249#if 0
3250/*
3251 * necessary only for block ack mode
3252 */
3253void
3254iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3255 uint16_t len)
3256{
3257 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3258 uint16_t w_val;
3259
3260 scd_bc_tbl = sc->sched_dma.vaddr;
3261
3262 len += 8; /* magic numbers came naturally from paris */
3263 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3264 len = roundup(len, 4) / 4;
3265
3266 w_val = htole16(sta_id << 12 | len);
3267
3268 /* Update TX scheduler. */
3269 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3270 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3271 BUS_DMASYNC_PREWRITE);
3272
3273 /* I really wonder what this is ?!? */
3274 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3275 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3276 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3277 BUS_DMASYNC_PREWRITE);
3278 }
3279}
3280#endif
3281
3282/*
3283 * Take an 802.11 (non-n) rate, find the relevant rate
3284 * table entry. return the index into in_ridx[].
3285 *
3286 * The caller then uses that index back into in_ridx
3287 * to figure out the rate index programmed /into/
3288 * the firmware for this given node.
3289 */
3290static int
3291iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3292 uint8_t rate)
3293{
3294 int i;
3295 uint8_t r;
3296
3297 for (i = 0; i < nitems(in->in_ridx); i++) {
3298 r = iwm_rates[in->in_ridx[i]].rate;
3299 if (rate == r)
3300 return (i);
3301 }
3302 /* XXX Return the first */
3303 /* XXX TODO: have it return the /lowest/ */
3304 return (0);
3305}
3306
3307/*
77de6c2d 3308 * Fill in the rate related information for a transmit command.
24a8d46a
MD
3309 */
3310static const struct iwm_rate *
3311iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3312 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3313{
77de6c2d 3314 struct ieee80211com *ic = &sc->sc_ic;
24a8d46a
MD
3315 struct ieee80211_node *ni = &in->in_ni;
3316 const struct iwm_rate *rinfo;
3317 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3318 int ridx, rate_flags;
3319
3320 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3321 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3322
3323 /*
3324 * XXX TODO: everything about the rate selection here is terrible!
3325 */
3326
3327 if (type == IEEE80211_FC0_TYPE_DATA) {
3328 int i;
3329 /* for data frames, use RS table */
3330 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3331 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3332 ridx = in->in_ridx[i];
3333
3334 /* This is the index into the programmed table */
3335 tx->initial_rate_index = i;
3336 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3337 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3338 "%s: start with i=%d, txrate %d\n",
3339 __func__, i, iwm_rates[ridx].rate);
77de6c2d 3340 } else {
24a8d46a 3341 /*
77de6c2d
IV
3342 * For non-data, use the lowest supported rate for the given
3343 * operational mode.
3344 *
3345 * Note: there may not be any rate control information available.
3346 * This driver currently assumes if we're transmitting data
3347 * frames, use the rate control table. Grr.
3348 *
3349 * XXX TODO: use the configured rate for the traffic type!
3350 * XXX TODO: this should be per-vap, not curmode; as we later
3351 * on we'll want to handle off-channel stuff (eg TDLS).
24a8d46a 3352 */
77de6c2d
IV
3353 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3354 /*
3355 * XXX this assumes the mode is either 11a or not 11a;
3356 * definitely won't work for 11n.
3357 */
3358 ridx = IWM_RIDX_OFDM;
3359 } else {
3360 ridx = IWM_RIDX_CCK;
3361 }
24a8d46a
MD
3362 }
3363
3364 rinfo = &iwm_rates[ridx];
3365
3366 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3367 __func__, ridx,
3368 rinfo->rate,
3369 !! (IWM_RIDX_IS_CCK(ridx))
3370 );
3371
3372 /* XXX TODO: hard-coded TX antenna? */
3373 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3374 if (IWM_RIDX_IS_CCK(ridx))
3375 rate_flags |= IWM_RATE_MCS_CCK_MSK;
24a8d46a
MD
3376 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3377
3378 return rinfo;
3379}
3380
3381#define TB0_SIZE 16
3382static int
3383iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3384{
77de6c2d 3385 struct ieee80211com *ic = &sc->sc_ic;
24a8d46a 3386 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
77de6c2d 3387 struct iwm_node *in = IWM_NODE(ni);
24a8d46a
MD
3388 struct iwm_tx_ring *ring;
3389 struct iwm_tx_data *data;
3390 struct iwm_tfd *desc;
3391 struct iwm_device_cmd *cmd;
3392 struct iwm_tx_cmd *tx;
3393 struct ieee80211_frame *wh;
3394 struct ieee80211_key *k = NULL;
f073608f
IV
3395#if !defined(__DragonFly__)
3396 struct mbuf *m1;
3397#endif
24a8d46a
MD
3398 const struct iwm_rate *rinfo;
3399 uint32_t flags;
3400 u_int hdrlen;
3401 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3402 int nsegs;
3403 uint8_t tid, type;
3404 int i, totlen, error, pad;
3405
3406 wh = mtod(m, struct ieee80211_frame *);
3407 hdrlen = ieee80211_anyhdrsize(wh);
3408 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3409 tid = 0;
3410 ring = &sc->txq[ac];
3411 desc = &ring->desc[ring->cur];
3412 memset(desc, 0, sizeof(*desc));
3413 data = &ring->data[ring->cur];
3414
3415 /* Fill out iwm_tx_cmd to send to the firmware */
3416 cmd = &ring->cmd[ring->cur];
3417 cmd->hdr.code = IWM_TX_CMD;
3418 cmd->hdr.flags = 0;
3419 cmd->hdr.qid = ring->qid;
3420 cmd->hdr.idx = ring->cur;
3421
3422 tx = (void *)cmd->data;
3423 memset(tx, 0, sizeof(*tx));
3424
3425 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3426
3427 /* Encrypt the frame if need be. */
3428 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3429 /* Retrieve key for TX && do software encryption. */
3430 k = ieee80211_crypto_encap(ni, m);
3431 if (k == NULL) {
3432 m_freem(m);
3433 return (ENOBUFS);
3434 }
3435 /* 802.11 header may have moved. */
3436 wh = mtod(m, struct ieee80211_frame *);
3437 }
3438
3439 if (ieee80211_radiotap_active_vap(vap)) {
3440 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3441
3442 tap->wt_flags = 0;
3443 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3444 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3445 tap->wt_rate = rinfo->rate;
24a8d46a
MD
3446 if (k != NULL)
3447 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3448 ieee80211_radiotap_tx(vap, m);
3449 }
3450
3451
3452 totlen = m->m_pkthdr.len;
3453
3454 flags = 0;
3455 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3456 flags |= IWM_TX_CMD_FLG_ACK;
3457 }
3458
8cc03924 3459 if (type == IEEE80211_FC0_TYPE_DATA
24a8d46a
MD
3460 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3461 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3462 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3463 }
3464
3465 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3466 type != IEEE80211_FC0_TYPE_DATA)
3467 tx->sta_id = sc->sc_aux_sta.sta_id;
3468 else
3469 tx->sta_id = IWM_STATION_ID;
3470
3471 if (type == IEEE80211_FC0_TYPE_MGT) {
3472 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3473
3474 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
ba00f0e3
IV
3475 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3476 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3477 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3478 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3479 } else {
3480 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3481 }
24a8d46a 3482 } else {
ba00f0e3 3483 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
24a8d46a
MD
3484 }
3485
3486 if (hdrlen & 3) {
3487 /* First segment length must be a multiple of 4. */
3488 flags |= IWM_TX_CMD_FLG_MH_PAD;
3489 pad = 4 - (hdrlen & 3);
3490 } else
3491 pad = 0;
3492
3493 tx->driver_txop = 0;
3494 tx->next_frame_len = 0;
3495
3496 tx->len = htole16(totlen);
3497 tx->tid_tspec = tid;
3498 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3499
3500 /* Set physical address of "scratch area". */
3501 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3502 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3503
3504 /* Copy 802.11 header in TX command. */
3505 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3506
3507 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3508
3509 tx->sec_ctl = 0;
3510 tx->tx_flags |= htole32(flags);
3511
3512 /* Trim 802.11 header. */
3513 m_adj(m, hdrlen);
45bc40b1 3514#if defined(__DragonFly__)
f073608f 3515 error = bus_dmamap_load_mbuf_defrag(ring->data_dmat, data->map, &m,
352c1ce9 3516 segs, IWM_MAX_SCATTER - 2,
45bc40b1
MD
3517 &nsegs, BUS_DMA_NOWAIT);
3518#else
24a8d46a
MD
3519 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3520 segs, &nsegs, BUS_DMA_NOWAIT);
45bc40b1 3521#endif
77de6c2d 3522 if (error != 0) {
f073608f
IV
3523#if defined(__DragonFly__)
3524 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3525 error);
3526 m_freem(m);
3527 return error;
3528#else
77de6c2d
IV
3529 if (error != EFBIG) {
3530 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3531 error);
3532 m_freem(m);
3533 return error;
3534 }
24a8d46a 3535 /* Too many DMA segments, linearize mbuf. */
f073608f
IV
3536 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3537 if (m1 == NULL) {
77de6c2d
IV
3538 device_printf(sc->sc_dev,
3539 "%s: could not defrag mbuf\n", __func__);
24a8d46a 3540 m_freem(m);
77de6c2d 3541 return (ENOBUFS);
24a8d46a 3542 }
f073608f 3543 m = m1;
77de6c2d 3544
24a8d46a
MD
3545 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3546 segs, &nsegs, BUS_DMA_NOWAIT);
77de6c2d 3547 if (error != 0) {
24a8d46a
MD
3548 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3549 error);
3550 m_freem(m);
3551 return error;
3552 }
f073608f 3553#endif
24a8d46a
MD
3554 }
3555 data->m = m;
3556 data->in = in;
3557 data->done = 0;
3558
3559 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3560 "sending txd %p, in %p\n", data, data->in);
3561 KASSERT(data->in != NULL, ("node is NULL"));
3562
3563 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
77de6c2d
IV
3564 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3565 ring->qid, ring->cur, totlen, nsegs,
3566 le32toh(tx->tx_flags),
3567 le32toh(tx->rate_n_flags),
3568 tx->initial_rate_index
3569 );
24a8d46a
MD
3570
3571 /* Fill TX descriptor. */
3572 desc->num_tbs = 2 + nsegs;
3573
3574 desc->tbs[0].lo = htole32(data->cmd_paddr);
3575 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3576 (TB0_SIZE << 4);
3577 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3578 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3579 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3580 + hdrlen + pad - TB0_SIZE) << 4);
3581
3582 /* Other DMA segments are for data payload. */
3583 for (i = 0; i < nsegs; i++) {
3584 seg = &segs[i];
3585 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3586 desc->tbs[i+2].hi_n_len = \
3587 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3588 | ((seg->ds_len) << 4);
3589 }
3590
3591 bus_dmamap_sync(ring->data_dmat, data->map,
3592 BUS_DMASYNC_PREWRITE);
3593 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3594 BUS_DMASYNC_PREWRITE);
3595 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3596 BUS_DMASYNC_PREWRITE);
3597
3598#if 0
3599 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3600#endif
3601
3602 /* Kick TX ring. */
3603 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3604 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3605
3606 /* Mark TX ring as full if we reach a certain threshold. */
3607 if (++ring->queued > IWM_TX_RING_HIMARK) {
3608 sc->qfullmsk |= 1 << ring->qid;
3609 }
3610
3611 return 0;
3612}
3613
3614static int
3615iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3616 const struct ieee80211_bpf_params *params)
3617{
3618 struct ieee80211com *ic = ni->ni_ic;
4f1aaf2f 3619 struct iwm_softc *sc = ic->ic_softc;
24a8d46a
MD
3620 int error = 0;
3621
3622 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3623 "->%s begin\n", __func__);
3624
77de6c2d 3625 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
24a8d46a
MD
3626 m_freem(m);
3627 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3628 "<-%s not RUNNING\n", __func__);
3629 return (ENETDOWN);
3630 }
3631
3632 IWM_LOCK(sc);
3633 /* XXX fix this */
3634 if (params == NULL) {
3635 error = iwm_tx(sc, m, ni, 0);
3636 } else {
3637 error = iwm_tx(sc, m, ni, 0);
3638 }
24a8d46a
MD
3639 sc->sc_tx_timer = 5;
3640 IWM_UNLOCK(sc);
3641
3642 return (error);
3643}
3644
3645/*
3646 * mvm/tx.c
3647 */
3648
3649#if 0
3650/*
3651 * Note that there are transports that buffer frames before they reach
3652 * the firmware. This means that after flush_tx_path is called, the
3653 * queue might not be empty. The race-free way to handle this is to:
3654 * 1) set the station as draining
3655 * 2) flush the Tx path
3656 * 3) wait for the transport queues to be empty
3657 */
3658int
3659iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3660{
3661 struct iwm_tx_path_flush_cmd flush_cmd = {
3662 .queues_ctl = htole32(tfd_msk),
3663 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3664 };
3665 int ret;
3666
3667 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3668 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3669 sizeof(flush_cmd), &flush_cmd);
3670 if (ret)
3671 device_printf(sc->sc_dev,
3672 "Flushing tx queue failed: %d\n", ret);
3673 return ret;
3674}
3675#endif
3676
24a8d46a
MD
3677static int
3678iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
edfc8a07 3679 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
24a8d46a 3680{
edfc8a07
IV
3681 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3682 cmd, status);
24a8d46a
MD
3683}
3684
3685/* send station add/update command to firmware */
3686static int
3687iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3688{
edfc8a07 3689 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
24a8d46a
MD
3690 int ret;
3691 uint32_t status;
3692
3693 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3694
3695 add_sta_cmd.sta_id = IWM_STATION_ID;
3696 add_sta_cmd.mac_id_n_color
3697 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3698 IWM_DEFAULT_COLOR));
3699 if (!update) {
edfc8a07
IV
3700 int ac;
3701 for (ac = 0; ac < WME_NUM_AC; ac++) {
3702 add_sta_cmd.tfd_queue_msk |=
3703 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3704 }
24a8d46a
MD
3705 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3706 }
3707 add_sta_cmd.add_modify = update ? 1 : 0;
3708 add_sta_cmd.station_flags_msk
3709 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
edfc8a07
IV
3710 add_sta_cmd.tid_disable_tx = htole16(0xffff);
3711 if (update)
3712 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
24a8d46a
MD
3713
3714 status = IWM_ADD_STA_SUCCESS;
3715 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3716 if (ret)
3717 return ret;
3718
3719 switch (status) {
3720 case IWM_ADD_STA_SUCCESS:
3721 break;
3722 default:
3723 ret = EIO;
3724 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3725 break;
3726 }
3727
3728 return ret;
3729}
3730
3731static int
3732iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3733{
29fcb331 3734 return iwm_mvm_sta_send_to_fw(sc, in, 0);
24a8d46a
MD
3735}
3736
3737static int
3738iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)