iwn - Lock iwn_timer_timeout callback.
[dragonfly.git] / sys / dev / netif / iwn / if_iwn.c
CommitLineData
ffd7c74a
JT
1/*-
2 * Copyright (c) 2007-2009
3 * Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2008
5 * Benjamin Close <benjsc@FreeBSD.org>
6 * Copyright (c) 2008 Sam Leffler, Errno Consulting
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23 * adapters.
24 */
25
3db796ac 26/* $FreeBSD$ */
ffd7c74a
JT
27
28#include <sys/param.h>
29#include <sys/sockio.h>
30#include <sys/sysctl.h>
31#include <sys/mbuf.h>
32#include <sys/kernel.h>
33#include <sys/socket.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/bus.h>
37#include <sys/rman.h>
38#include <sys/endian.h>
39#include <sys/firmware.h>
40#include <sys/limits.h>
41#include <sys/module.h>
42#include <sys/queue.h>
43#include <sys/taskqueue.h>
3db796ac 44#include <sys/libkern.h>
ffd7c74a 45
3db796ac
JT
46#include <sys/bus.h>
47#include <sys/resource.h>
ffd7c74a
JT
48#include <machine/clock.h>
49
3db796ac
JT
50#include <bus/pci/pcireg.h>
51#include <bus/pci/pcivar.h>
ffd7c74a
JT
52
53#include <net/bpf.h>
54#include <net/if.h>
55#include <net/if_arp.h>
3db796ac 56#include <net/ifq_var.h>
ffd7c74a
JT
57#include <net/ethernet.h>
58#include <net/if_dl.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/in_var.h>
65#include <netinet/if_ether.h>
66#include <netinet/ip.h>
67
3db796ac
JT
68#include <netproto/802_11/ieee80211_var.h>
69#include <netproto/802_11/ieee80211_radiotap.h>
70#include <netproto/802_11/ieee80211_regdomain.h>
71#include <netproto/802_11/ieee80211_ratectl.h>
ffd7c74a 72
3db796ac
JT
73#include "if_iwnreg.h"
74#include "if_iwnvar.h"
ffd7c74a
JT
75
76static int iwn_probe(device_t);
77static int iwn_attach(device_t);
78static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *);
79static void iwn_radiotap_attach(struct iwn_softc *);
80static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
81 const char name[IFNAMSIZ], int unit, int opmode,
82 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
83 const uint8_t mac[IEEE80211_ADDR_LEN]);
84static void iwn_vap_delete(struct ieee80211vap *);
85static int iwn_cleanup(device_t);
86static int iwn_detach(device_t);
87static int iwn_nic_lock(struct iwn_softc *);
88static int iwn_eeprom_lock(struct iwn_softc *);
89static int iwn_init_otprom(struct iwn_softc *);
90static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
91static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
92static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
93 void **, bus_size_t, bus_size_t, int);
94static void iwn_dma_contig_free(struct iwn_dma_info *);
95static int iwn_alloc_sched(struct iwn_softc *);
96static void iwn_free_sched(struct iwn_softc *);
97static int iwn_alloc_kw(struct iwn_softc *);
98static void iwn_free_kw(struct iwn_softc *);
99static int iwn_alloc_ict(struct iwn_softc *);
100static void iwn_free_ict(struct iwn_softc *);
101static int iwn_alloc_fwmem(struct iwn_softc *);
102static void iwn_free_fwmem(struct iwn_softc *);
103static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
104static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
105static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
106static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
107 int);
108static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
109static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
110static void iwn5000_ict_reset(struct iwn_softc *);
111static int iwn_read_eeprom(struct iwn_softc *,
112 uint8_t macaddr[IEEE80211_ADDR_LEN]);
113static void iwn4965_read_eeprom(struct iwn_softc *);
114static void iwn4965_print_power_group(struct iwn_softc *, int);
115static void iwn5000_read_eeprom(struct iwn_softc *);
116static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
117static void iwn_read_eeprom_band(struct iwn_softc *, int);
118#if 0 /* HT */
119static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
120#endif
121static void iwn_read_eeprom_channels(struct iwn_softc *, int,
122 uint32_t);
123static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
124static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
125 const uint8_t mac[IEEE80211_ADDR_LEN]);
126static void iwn_newassoc(struct ieee80211_node *, int);
127static int iwn_media_change(struct ifnet *);
128static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
129static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
130 struct iwn_rx_data *);
131static void iwn_timer_timeout(void *);
132static void iwn_calib_reset(struct iwn_softc *);
133static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
134 struct iwn_rx_data *);
135#if 0 /* HT */
136static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
137 struct iwn_rx_data *);
138#endif
139static void iwn5000_rx_calib_results(struct iwn_softc *,
140 struct iwn_rx_desc *, struct iwn_rx_data *);
141static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
142 struct iwn_rx_data *);
143static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
144 struct iwn_rx_data *);
145static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
146 struct iwn_rx_data *);
147static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
148 uint8_t);
149static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
150static void iwn_notif_intr(struct iwn_softc *);
151static void iwn_wakeup_intr(struct iwn_softc *);
152static void iwn_rftoggle_intr(struct iwn_softc *);
153static void iwn_fatal_intr(struct iwn_softc *);
154static void iwn_intr(void *);
155static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
156 uint16_t);
157static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
158 uint16_t);
159#ifdef notyet
160static void iwn5000_reset_sched(struct iwn_softc *, int, int);
161#endif
162static uint8_t iwn_plcp_signal(int);
163static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
164 struct ieee80211_node *, struct iwn_tx_ring *);
165static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
166 const struct ieee80211_bpf_params *);
167static void iwn_start(struct ifnet *);
168static void iwn_start_locked(struct ifnet *);
169static void iwn_watchdog(struct iwn_softc *sc);
3db796ac 170static int iwn_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
ffd7c74a
JT
171static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
172static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
173 int);
174static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
175 int);
176static int iwn_set_link_quality(struct iwn_softc *, uint8_t, int);
177static int iwn_add_broadcast_node(struct iwn_softc *, int);
178static int iwn_wme_update(struct ieee80211com *);
179static void iwn_update_mcast(struct ifnet *);
180static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
181static int iwn_set_critical_temp(struct iwn_softc *);
182static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
183static void iwn4965_power_calibration(struct iwn_softc *, int);
184static int iwn4965_set_txpower(struct iwn_softc *,
185 struct ieee80211_channel *, int);
186static int iwn5000_set_txpower(struct iwn_softc *,
187 struct ieee80211_channel *, int);
188static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
189static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
190static int iwn_get_noise(const struct iwn_rx_general_stats *);
191static int iwn4965_get_temperature(struct iwn_softc *);
192static int iwn5000_get_temperature(struct iwn_softc *);
193static int iwn_init_sensitivity(struct iwn_softc *);
194static void iwn_collect_noise(struct iwn_softc *,
195 const struct iwn_rx_general_stats *);
196static int iwn4965_init_gains(struct iwn_softc *);
197static int iwn5000_init_gains(struct iwn_softc *);
198static int iwn4965_set_gains(struct iwn_softc *);
199static int iwn5000_set_gains(struct iwn_softc *);
200static void iwn_tune_sensitivity(struct iwn_softc *,
201 const struct iwn_rx_stats *);
202static int iwn_send_sensitivity(struct iwn_softc *);
203static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
204static int iwn_config(struct iwn_softc *);
205static int iwn_scan(struct iwn_softc *);
206static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
207static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
208#if 0 /* HT */
209static int iwn_ampdu_rx_start(struct ieee80211com *,
210 struct ieee80211_node *, uint8_t);
211static void iwn_ampdu_rx_stop(struct ieee80211com *,
212 struct ieee80211_node *, uint8_t);
213static int iwn_ampdu_tx_start(struct ieee80211com *,
214 struct ieee80211_node *, uint8_t);
215static void iwn_ampdu_tx_stop(struct ieee80211com *,
216 struct ieee80211_node *, uint8_t);
217static void iwn4965_ampdu_tx_start(struct iwn_softc *,
218 struct ieee80211_node *, uint8_t, uint16_t);
219static void iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
220static void iwn5000_ampdu_tx_start(struct iwn_softc *,
221 struct ieee80211_node *, uint8_t, uint16_t);
222static void iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
223#endif
224static int iwn5000_query_calibration(struct iwn_softc *);
225static int iwn5000_send_calibration(struct iwn_softc *);
226static int iwn5000_send_wimax_coex(struct iwn_softc *);
227static int iwn4965_post_alive(struct iwn_softc *);
228static int iwn5000_post_alive(struct iwn_softc *);
229static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
230 int);
231static int iwn4965_load_firmware(struct iwn_softc *);
232static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
233 const uint8_t *, int);
234static int iwn5000_load_firmware(struct iwn_softc *);
235static int iwn_read_firmware(struct iwn_softc *);
236static int iwn_clock_wait(struct iwn_softc *);
237static int iwn_apm_init(struct iwn_softc *);
238static void iwn_apm_stop_master(struct iwn_softc *);
239static void iwn_apm_stop(struct iwn_softc *);
240static int iwn4965_nic_config(struct iwn_softc *);
241static int iwn5000_nic_config(struct iwn_softc *);
242static int iwn_hw_prepare(struct iwn_softc *);
243static int iwn_hw_init(struct iwn_softc *);
244static void iwn_hw_stop(struct iwn_softc *);
245static void iwn_init_locked(struct iwn_softc *);
246static void iwn_init(void *);
247static void iwn_stop_locked(struct iwn_softc *);
248static void iwn_stop(struct iwn_softc *);
249static void iwn_scan_start(struct ieee80211com *);
250static void iwn_scan_end(struct ieee80211com *);
251static void iwn_set_channel(struct ieee80211com *);
252static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
253static void iwn_scan_mindwell(struct ieee80211_scan_state *);
254static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
255 struct ieee80211_channel *);
256static int iwn_setregdomain(struct ieee80211com *,
257 struct ieee80211_regdomain *, int,
258 struct ieee80211_channel []);
259static void iwn_hw_reset(void *, int);
260static void iwn_radio_on(void *, int);
261static void iwn_radio_off(void *, int);
262static void iwn_sysctlattach(struct iwn_softc *);
263static int iwn_shutdown(device_t);
264static int iwn_suspend(device_t);
265static int iwn_resume(device_t);
266
267#define IWN_DEBUG
268#ifdef IWN_DEBUG
269enum {
270 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
271 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */
272 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
273 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */
274 IWN_DEBUG_RESET = 0x00000010, /* reset processing */
275 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */
276 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */
277 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
278 IWN_DEBUG_INTR = 0x00000100, /* ISR */
279 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
280 IWN_DEBUG_NODE = 0x00000400, /* node management */
281 IWN_DEBUG_LED = 0x00000800, /* led management */
282 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */
283 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */
284 IWN_DEBUG_ANY = 0xffffffff
285};
286
287#define DPRINTF(sc, m, fmt, ...) do { \
288 if (sc->sc_debug & (m)) \
3db796ac 289 kprintf(fmt, __VA_ARGS__); \
ffd7c74a
JT
290} while (0)
291
292static const char *iwn_intr_str(uint8_t);
293#else
294#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
295#endif
296
297struct iwn_ident {
298 uint16_t vendor;
299 uint16_t device;
300 const char *name;
301};
302
303static const struct iwn_ident iwn_ident_table [] = {
304 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" },
305 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" },
306 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" },
307 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" },
308 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" },
309 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" },
310 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" },
311 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" },
312 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" },
313 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" },
314 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5350" },
315 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" },
316 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" },
317 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" },
318 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" },
319 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" },
320 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" },
321 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" },
322 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" },
323 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" },
324 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" },
325 { 0x8086, 0x0086, "Intel(R) PRO/Wireless 6050" },
326 { 0x8086, 0x0087, "Intel(R) PRO/Wireless 6050" },
327 { 0, 0, NULL }
328};
329
330static const struct iwn_hal iwn4965_hal = {
331 iwn4965_load_firmware,
332 iwn4965_read_eeprom,
333 iwn4965_post_alive,
334 iwn4965_nic_config,
335 iwn4965_update_sched,
336 iwn4965_get_temperature,
337 iwn4965_get_rssi,
338 iwn4965_set_txpower,
339 iwn4965_init_gains,
340 iwn4965_set_gains,
341 iwn4965_add_node,
342 iwn4965_tx_done,
343#if 0 /* HT */
344 iwn4965_ampdu_tx_start,
345 iwn4965_ampdu_tx_stop,
346#endif
347 IWN4965_NTXQUEUES,
348 IWN4965_NDMACHNLS,
349 IWN4965_ID_BROADCAST,
350 IWN4965_RXONSZ,
351 IWN4965_SCHEDSZ,
352 IWN4965_FW_TEXT_MAXSZ,
353 IWN4965_FW_DATA_MAXSZ,
354 IWN4965_FWSZ,
355 IWN4965_SCHED_TXFACT
356};
357
358static const struct iwn_hal iwn5000_hal = {
359 iwn5000_load_firmware,
360 iwn5000_read_eeprom,
361 iwn5000_post_alive,
362 iwn5000_nic_config,
363 iwn5000_update_sched,
364 iwn5000_get_temperature,
365 iwn5000_get_rssi,
366 iwn5000_set_txpower,
367 iwn5000_init_gains,
368 iwn5000_set_gains,
369 iwn5000_add_node,
370 iwn5000_tx_done,
371#if 0 /* HT */
372 iwn5000_ampdu_tx_start,
373 iwn5000_ampdu_tx_stop,
374#endif
375 IWN5000_NTXQUEUES,
376 IWN5000_NDMACHNLS,
377 IWN5000_ID_BROADCAST,
378 IWN5000_RXONSZ,
379 IWN5000_SCHEDSZ,
380 IWN5000_FW_TEXT_MAXSZ,
381 IWN5000_FW_DATA_MAXSZ,
382 IWN5000_FWSZ,
383 IWN5000_SCHED_TXFACT
384};
385
386static int
387iwn_probe(device_t dev)
388{
389 const struct iwn_ident *ident;
390
391 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
392 if (pci_get_vendor(dev) == ident->vendor &&
393 pci_get_device(dev) == ident->device) {
394 device_set_desc(dev, ident->name);
395 return 0;
396 }
397 }
398 return ENXIO;
399}
400
401static int
402iwn_attach(device_t dev)
403{
404 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
405 struct ieee80211com *ic;
406 struct ifnet *ifp;
407 const struct iwn_hal *hal;
408 uint32_t tmp;
409 int i, error, result;
410 uint8_t macaddr[IEEE80211_ADDR_LEN];
411
412 sc->sc_dev = dev;
3db796ac
JT
413 sc->sc_dmat = NULL;
414
415 if (bus_dma_tag_create(sc->sc_dmat,
416 1, 0,
417 BUS_SPACE_MAXADDR_32BIT,
418 BUS_SPACE_MAXADDR,
419 NULL, NULL,
420 BUS_SPACE_MAXSIZE,
421 IWN_MAX_SCATTER,
422 BUS_SPACE_MAXSIZE,
423 BUS_DMA_ALLOCNOW,
424 &sc->sc_dmat)) {
425 device_printf(dev, "cannot allocate DMA tag\n");
426 error = ENOMEM;
427 goto fail;
428 }
429
430
431
432 /* prepare sysctl tree for use in sub modules */
433 sysctl_ctx_init(&sc->sc_sysctl_ctx);
434 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
435 SYSCTL_STATIC_CHILDREN(_hw),
436 OID_AUTO,
437 device_get_nameunit(sc->sc_dev),
438 CTLFLAG_RD, 0, "");
ffd7c74a
JT
439
440 /*
441 * Get the offset of the PCI Express Capability Structure in PCI
442 * Configuration Space.
443 */
444 error = pci_find_extcap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
445 if (error != 0) {
446 device_printf(dev, "PCIe capability structure not found!\n");
447 return error;
448 }
449
450 /* Clear device-specific "PCI retry timeout" register (41h). */
451 pci_write_config(dev, 0x41, 0, 1);
452
453 /* Hardware bug workaround. */
454 tmp = pci_read_config(dev, PCIR_COMMAND, 1);
455 if (tmp & PCIM_CMD_INTxDIS) {
456 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
457 __func__);
458 tmp &= ~PCIM_CMD_INTxDIS;
459 pci_write_config(dev, PCIR_COMMAND, tmp, 1);
460 }
461
462 /* Enable bus-mastering. */
463 pci_enable_busmaster(dev);
464
465 sc->mem_rid = PCIR_BAR(0);
466 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
467 RF_ACTIVE);
468 if (sc->mem == NULL ) {
469 device_printf(dev, "could not allocate memory resources\n");
470 error = ENOMEM;
471 return error;
472 }
473
474 sc->sc_st = rman_get_bustag(sc->mem);
475 sc->sc_sh = rman_get_bushandle(sc->mem);
476 sc->irq_rid = 0;
477 if ((result = pci_msi_count(dev)) == 1 &&
478 pci_alloc_msi(dev, &result) == 0)
479 sc->irq_rid = 1;
480 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
481 RF_ACTIVE | RF_SHAREABLE);
482 if (sc->irq == NULL) {
483 device_printf(dev, "could not allocate interrupt resource\n");
484 error = ENOMEM;
485 goto fail;
486 }
487
488 IWN_LOCK_INIT(sc);
3db796ac 489 callout_init(&sc->sc_timer_to);
ffd7c74a
JT
490 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc );
491 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc );
492 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc );
493
494 /* Attach Hardware Abstraction Layer. */
495 hal = iwn_hal_attach(sc);
496 if (hal == NULL) {
497 error = ENXIO; /* XXX: Wrong error code? */
498 goto fail;
499 }
500
501 error = iwn_hw_prepare(sc);
502 if (error != 0) {
503 device_printf(dev, "hardware not ready, error %d\n", error);
504 goto fail;
505 }
506
507 /* Allocate DMA memory for firmware transfers. */
508 error = iwn_alloc_fwmem(sc);
509 if (error != 0) {
510 device_printf(dev,
511 "could not allocate memory for firmware, error %d\n",
512 error);
513 goto fail;
514 }
515
516 /* Allocate "Keep Warm" page. */
517 error = iwn_alloc_kw(sc);
518 if (error != 0) {
519 device_printf(dev,
520 "could not allocate \"Keep Warm\" page, error %d\n", error);
521 goto fail;
522 }
523
524 /* Allocate ICT table for 5000 Series. */
525 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
526 (error = iwn_alloc_ict(sc)) != 0) {
527 device_printf(dev,
528 "%s: could not allocate ICT table, error %d\n",
529 __func__, error);
530 goto fail;
531 }
532
533 /* Allocate TX scheduler "rings". */
534 error = iwn_alloc_sched(sc);
535 if (error != 0) {
536 device_printf(dev,
537 "could not allocate TX scheduler rings, error %d\n",
538 error);
539 goto fail;
540 }
541
542 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */
543 for (i = 0; i < hal->ntxqs; i++) {
544 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i);
545 if (error != 0) {
546 device_printf(dev,
547 "could not allocate Tx ring %d, error %d\n",
548 i, error);
549 goto fail;
550 }
551 }
552
553 /* Allocate RX ring. */
554 error = iwn_alloc_rx_ring(sc, &sc->rxq);
555 if (error != 0 ){
556 device_printf(dev,
557 "could not allocate Rx ring, error %d\n", error);
558 goto fail;
559 }
560
561 /* Clear pending interrupts. */
562 IWN_WRITE(sc, IWN_INT, 0xffffffff);
563
564 /* Count the number of available chains. */
565 sc->ntxchains =
566 ((sc->txchainmask >> 2) & 1) +
567 ((sc->txchainmask >> 1) & 1) +
568 ((sc->txchainmask >> 0) & 1);
569 sc->nrxchains =
570 ((sc->rxchainmask >> 2) & 1) +
571 ((sc->rxchainmask >> 1) & 1) +
572 ((sc->rxchainmask >> 0) & 1);
573
574 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
575 if (ifp == NULL) {
576 device_printf(dev, "can not allocate ifnet structure\n");
577 goto fail;
578 }
579 ic = ifp->if_l2com;
580
581 ic->ic_ifp = ifp;
582 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
583 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
584
585 /* Set device capabilities. */
586 ic->ic_caps =
587 IEEE80211_C_STA /* station mode supported */
588 | IEEE80211_C_MONITOR /* monitor mode supported */
589 | IEEE80211_C_TXPMGT /* tx power management */
590 | IEEE80211_C_SHSLOT /* short slot time supported */
591 | IEEE80211_C_WPA
592 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
593 | IEEE80211_C_BGSCAN /* background scanning */
594#if 0
595 | IEEE80211_C_IBSS /* ibss/adhoc mode */
596#endif
597 | IEEE80211_C_WME /* WME */
598 ;
599#if 0 /* HT */
600 /* XXX disable until HT channel setup works */
601 ic->ic_htcaps =
602 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
603 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
604 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
605 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
606 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
607 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
608 /* s/w capabilities */
609 | IEEE80211_HTC_HT /* HT operation */
610 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
611 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
612 ;
613
614 /* Set HT capabilities. */
615 ic->ic_htcaps =
616#if IWN_RBUF_SIZE == 8192
617 IEEE80211_HTCAP_AMSDU7935 |
618#endif
619 IEEE80211_HTCAP_CBW20_40 |
620 IEEE80211_HTCAP_SGI20 |
621 IEEE80211_HTCAP_SGI40;
622 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
623 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
624 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
625 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
626 else
627 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
628#endif
629
630 /* Read MAC address, channels, etc from EEPROM. */
631 error = iwn_read_eeprom(sc, macaddr);
632 if (error != 0) {
633 device_printf(dev, "could not read EEPROM, error %d\n",
634 error);
635 goto fail;
636 }
637
638 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n",
639 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
640 macaddr, ":");
641
642#if 0 /* HT */
643 /* Set supported HT rates. */
644 ic->ic_sup_mcs[0] = 0xff;
645 if (sc->nrxchains > 1)
646 ic->ic_sup_mcs[1] = 0xff;
647 if (sc->nrxchains > 2)
648 ic->ic_sup_mcs[2] = 0xff;
649#endif
650
651 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
652 ifp->if_softc = sc;
653 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
654 ifp->if_init = iwn_init;
655 ifp->if_ioctl = iwn_ioctl;
656 ifp->if_start = iwn_start;
3db796ac
JT
657 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
658 ifq_set_ready(&ifp->if_snd);
ffd7c74a
JT
659
660 ieee80211_ifattach(ic, macaddr);
661 ic->ic_vap_create = iwn_vap_create;
662 ic->ic_vap_delete = iwn_vap_delete;
663 ic->ic_raw_xmit = iwn_raw_xmit;
664 ic->ic_node_alloc = iwn_node_alloc;
665 ic->ic_newassoc = iwn_newassoc;
666 ic->ic_wme.wme_update = iwn_wme_update;
667 ic->ic_update_mcast = iwn_update_mcast;
668 ic->ic_scan_start = iwn_scan_start;
669 ic->ic_scan_end = iwn_scan_end;
670 ic->ic_set_channel = iwn_set_channel;
671 ic->ic_scan_curchan = iwn_scan_curchan;
672 ic->ic_scan_mindwell = iwn_scan_mindwell;
673 ic->ic_setregdomain = iwn_setregdomain;
674#if 0 /* HT */
675 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
676 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
677 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
678 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
679#endif
680
681 iwn_radiotap_attach(sc);
682 iwn_sysctlattach(sc);
683
684 /*
685 * Hook our interrupt after all initialization is complete.
686 */
3db796ac
JT
687 error = bus_setup_intr(dev, sc->irq, INTR_MPSAFE,
688 iwn_intr, sc, &sc->sc_ih, ifp->if_serializer);
ffd7c74a
JT
689 if (error != 0) {
690 device_printf(dev, "could not set up interrupt, error %d\n",
691 error);
692 goto fail;
693 }
694
695 ieee80211_announce(ic);
696 return 0;
697fail:
698 iwn_cleanup(dev);
699 return error;
700}
701
702static const struct iwn_hal *
703iwn_hal_attach(struct iwn_softc *sc)
704{
705 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
706
707 switch (sc->hw_type) {
708 case IWN_HW_REV_TYPE_4965:
709 sc->sc_hal = &iwn4965_hal;
710 sc->limits = &iwn4965_sensitivity_limits;
711 sc->fwname = "iwn4965fw";
712 sc->txchainmask = IWN_ANT_AB;
713 sc->rxchainmask = IWN_ANT_ABC;
714 break;
715 case IWN_HW_REV_TYPE_5100:
716 sc->sc_hal = &iwn5000_hal;
717 sc->limits = &iwn5000_sensitivity_limits;
718 sc->fwname = "iwn5000fw";
719 sc->txchainmask = IWN_ANT_B;
720 sc->rxchainmask = IWN_ANT_AB;
721 break;
722 case IWN_HW_REV_TYPE_5150:
723 sc->sc_hal = &iwn5000_hal;
724 sc->limits = &iwn5150_sensitivity_limits;
725 sc->fwname = "iwn5150fw";
726 sc->txchainmask = IWN_ANT_A;
727 sc->rxchainmask = IWN_ANT_AB;
728 break;
729 case IWN_HW_REV_TYPE_5300:
730 case IWN_HW_REV_TYPE_5350:
731 sc->sc_hal = &iwn5000_hal;
732 sc->limits = &iwn5000_sensitivity_limits;
733 sc->fwname = "iwn5000fw";
734 sc->txchainmask = IWN_ANT_ABC;
735 sc->rxchainmask = IWN_ANT_ABC;
736 break;
737 case IWN_HW_REV_TYPE_1000:
738 sc->sc_hal = &iwn5000_hal;
739 sc->limits = &iwn1000_sensitivity_limits;
740 sc->fwname = "iwn1000fw";
741 sc->txchainmask = IWN_ANT_A;
742 sc->rxchainmask = IWN_ANT_AB;
743 break;
744 case IWN_HW_REV_TYPE_6000:
745 sc->sc_hal = &iwn5000_hal;
746 sc->limits = &iwn6000_sensitivity_limits;
747 sc->fwname = "iwn6000fw";
748 switch (pci_get_device(sc->sc_dev)) {
749 case 0x422C:
750 case 0x4239:
751 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
752 sc->txchainmask = IWN_ANT_BC;
753 sc->rxchainmask = IWN_ANT_BC;
754 break;
755 default:
756 sc->txchainmask = IWN_ANT_ABC;
757 sc->rxchainmask = IWN_ANT_ABC;
758 break;
759 }
760 break;
761 case IWN_HW_REV_TYPE_6050:
762 sc->sc_hal = &iwn5000_hal;
763 sc->limits = &iwn6000_sensitivity_limits;
764 sc->fwname = "iwn6000fw";
765 sc->txchainmask = IWN_ANT_AB;
766 sc->rxchainmask = IWN_ANT_AB;
767 break;
768 default:
769 device_printf(sc->sc_dev, "adapter type %d not supported\n",
770 sc->hw_type);
771 return NULL;
772 }
773 return sc->sc_hal;
774}
775
776/*
777 * Attach the interface to 802.11 radiotap.
778 */
779static void
780iwn_radiotap_attach(struct iwn_softc *sc)
781{
782 struct ifnet *ifp = sc->sc_ifp;
783 struct ieee80211com *ic = ifp->if_l2com;
784
785 ieee80211_radiotap_attach(ic,
786 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
787 IWN_TX_RADIOTAP_PRESENT,
788 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
789 IWN_RX_RADIOTAP_PRESENT);
790}
791
792static struct ieee80211vap *
793iwn_vap_create(struct ieee80211com *ic,
794 const char name[IFNAMSIZ], int unit, int opmode, int flags,
795 const uint8_t bssid[IEEE80211_ADDR_LEN],
796 const uint8_t mac[IEEE80211_ADDR_LEN])
797{
798 struct iwn_vap *ivp;
799 struct ieee80211vap *vap;
800
801 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
802 return NULL;
3db796ac
JT
803 ivp = (struct iwn_vap *) kmalloc(sizeof(struct iwn_vap),
804 M_80211_VAP, M_INTWAIT | M_ZERO);
ffd7c74a
JT
805 if (ivp == NULL)
806 return NULL;
807 vap = &ivp->iv_vap;
808 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
809 vap->iv_bmissthreshold = 10; /* override default */
810 /* Override with driver methods. */
811 ivp->iv_newstate = vap->iv_newstate;
812 vap->iv_newstate = iwn_newstate;
813
814 ieee80211_ratectl_init(vap);
815 /* Complete setup. */
816 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
817 ic->ic_opmode = opmode;
818 return vap;
819}
820
821static void
822iwn_vap_delete(struct ieee80211vap *vap)
823{
824 struct iwn_vap *ivp = IWN_VAP(vap);
825
826 ieee80211_ratectl_deinit(vap);
827 ieee80211_vap_detach(vap);
3db796ac 828 kfree(ivp, M_80211_VAP);
ffd7c74a
JT
829}
830
831static int
832iwn_cleanup(device_t dev)
833{
834 struct iwn_softc *sc = device_get_softc(dev);
835 struct ifnet *ifp = sc->sc_ifp;
836 struct ieee80211com *ic;
837 int i;
838
839 if (ifp != NULL) {
840 ic = ifp->if_l2com;
841
842 ieee80211_draintask(ic, &sc->sc_reinit_task);
843 ieee80211_draintask(ic, &sc->sc_radioon_task);
844 ieee80211_draintask(ic, &sc->sc_radiooff_task);
845
846 iwn_stop(sc);
3db796ac 847 callout_stop(&sc->sc_timer_to);
ffd7c74a
JT
848 ieee80211_ifdetach(ic);
849 }
850
851 /* Free DMA resources. */
852 iwn_free_rx_ring(sc, &sc->rxq);
853 if (sc->sc_hal != NULL)
854 for (i = 0; i < sc->sc_hal->ntxqs; i++)
855 iwn_free_tx_ring(sc, &sc->txq[i]);
856 iwn_free_sched(sc);
857 iwn_free_kw(sc);
858 if (sc->ict != NULL)
859 iwn_free_ict(sc);
860 iwn_free_fwmem(sc);
861
862 if (sc->irq != NULL) {
863 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
864 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
865 if (sc->irq_rid == 1)
866 pci_release_msi(dev);
867 }
868
869 if (sc->mem != NULL)
870 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
871
872 if (ifp != NULL)
873 if_free(ifp);
874
875 IWN_LOCK_DESTROY(sc);
876 return 0;
877}
878
879static int
880iwn_detach(device_t dev)
881{
3db796ac
JT
882 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
883
ffd7c74a 884 iwn_cleanup(dev);
3db796ac 885 bus_dma_tag_destroy(sc->sc_dmat);
ffd7c74a
JT
886 return 0;
887}
888
889static int
890iwn_nic_lock(struct iwn_softc *sc)
891{
892 int ntries;
893
894 /* Request exclusive access to NIC. */
895 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
896
897 /* Spin until we actually get the lock. */
898 for (ntries = 0; ntries < 1000; ntries++) {
899 if ((IWN_READ(sc, IWN_GP_CNTRL) &
900 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
901 IWN_GP_CNTRL_MAC_ACCESS_ENA)
902 return 0;
903 DELAY(10);
904 }
905 return ETIMEDOUT;
906}
907
908static __inline void
909iwn_nic_unlock(struct iwn_softc *sc)
910{
911 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
912}
913
914static __inline uint32_t
915iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
916{
917 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
918 IWN_BARRIER_READ_WRITE(sc);
919 return IWN_READ(sc, IWN_PRPH_RDATA);
920}
921
922static __inline void
923iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
924{
925 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
926 IWN_BARRIER_WRITE(sc);
927 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
928}
929
930static __inline void
931iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
932{
933 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
934}
935
936static __inline void
937iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
938{
939 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
940}
941
942static __inline void
943iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
944 const uint32_t *data, int count)
945{
946 for (; count > 0; count--, data++, addr += 4)
947 iwn_prph_write(sc, addr, *data);
948}
949
950static __inline uint32_t
951iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
952{
953 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
954 IWN_BARRIER_READ_WRITE(sc);
955 return IWN_READ(sc, IWN_MEM_RDATA);
956}
957
958static __inline void
959iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
960{
961 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
962 IWN_BARRIER_WRITE(sc);
963 IWN_WRITE(sc, IWN_MEM_WDATA, data);
964}
965
966static __inline void
967iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
968{
969 uint32_t tmp;
970
971 tmp = iwn_mem_read(sc, addr & ~3);
972 if (addr & 3)
973 tmp = (tmp & 0x0000ffff) | data << 16;
974 else
975 tmp = (tmp & 0xffff0000) | data;
976 iwn_mem_write(sc, addr & ~3, tmp);
977}
978
979static __inline void
980iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
981 int count)
982{
983 for (; count > 0; count--, addr += 4)
984 *data++ = iwn_mem_read(sc, addr);
985}
986
987static __inline void
988iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
989 int count)
990{
991 for (; count > 0; count--, addr += 4)
992 iwn_mem_write(sc, addr, val);
993}
994
995static int
996iwn_eeprom_lock(struct iwn_softc *sc)
997{
998 int i, ntries;
999
1000 for (i = 0; i < 100; i++) {
1001 /* Request exclusive access to EEPROM. */
1002 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1003 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1004
1005 /* Spin until we actually get the lock. */
1006 for (ntries = 0; ntries < 100; ntries++) {
1007 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1008 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1009 return 0;
1010 DELAY(10);
1011 }
1012 }
1013 return ETIMEDOUT;
1014}
1015
1016static __inline void
1017iwn_eeprom_unlock(struct iwn_softc *sc)
1018{
1019 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1020}
1021
1022/*
1023 * Initialize access by host to One Time Programmable ROM.
1024 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1025 */
1026static int
1027iwn_init_otprom(struct iwn_softc *sc)
1028{
1029 uint16_t prev, base, next;
1030 int count, error;
1031
1032 /* Wait for clock stabilization before accessing prph. */
1033 error = iwn_clock_wait(sc);
1034 if (error != 0)
1035 return error;
1036
1037 error = iwn_nic_lock(sc);
1038 if (error != 0)
1039 return error;
1040 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1041 DELAY(5);
1042 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1043 iwn_nic_unlock(sc);
1044
1045 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1046 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1047 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1048 IWN_RESET_LINK_PWR_MGMT_DIS);
1049 }
1050 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1051 /* Clear ECC status. */
1052 IWN_SETBITS(sc, IWN_OTP_GP,
1053 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1054
1055 /*
1056 * Find the block before last block (contains the EEPROM image)
1057 * for HW without OTP shadow RAM.
1058 */
1059 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1060 /* Switch to absolute addressing mode. */
1061 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1062 base = prev = 0;
1063 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1064 error = iwn_read_prom_data(sc, base, &next, 2);
1065 if (error != 0)
1066 return error;
1067 if (next == 0) /* End of linked-list. */
1068 break;
1069 prev = base;
1070 base = le16toh(next);
1071 }
1072 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1073 return EIO;
1074 /* Skip "next" word. */
1075 sc->prom_base = prev + 1;
1076 }
1077 return 0;
1078}
1079
1080static int
1081iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1082{
1083 uint32_t val, tmp;
1084 int ntries;
1085 uint8_t *out = data;
1086
1087 addr += sc->prom_base;
1088 for (; count > 0; count -= 2, addr++) {
1089 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1090 for (ntries = 0; ntries < 10; ntries++) {
1091 val = IWN_READ(sc, IWN_EEPROM);
1092 if (val & IWN_EEPROM_READ_VALID)
1093 break;
1094 DELAY(5);
1095 }
1096 if (ntries == 10) {
1097 device_printf(sc->sc_dev,
1098 "timeout reading ROM at 0x%x\n", addr);
1099 return ETIMEDOUT;
1100 }
1101 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1102 /* OTPROM, check for ECC errors. */
1103 tmp = IWN_READ(sc, IWN_OTP_GP);
1104 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1105 device_printf(sc->sc_dev,
1106 "OTPROM ECC error at 0x%x\n", addr);
1107 return EIO;
1108 }
1109 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1110 /* Correctable ECC error, clear bit. */
1111 IWN_SETBITS(sc, IWN_OTP_GP,
1112 IWN_OTP_GP_ECC_CORR_STTS);
1113 }
1114 }
1115 *out++ = val >> 16;
1116 if (count > 1)
1117 *out++ = val >> 24;
1118 }
1119 return 0;
1120}
1121
1122static void
1123iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1124{
1125 if (error != 0)
1126 return;
1127 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1128 *(bus_addr_t *)arg = segs[0].ds_addr;
1129}
1130
1131static int
1132iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1133 void **kvap, bus_size_t size, bus_size_t alignment, int flags)
1134{
1135 int error;
1136
1137 dma->size = size;
1138 dma->tag = NULL;
1139
3db796ac 1140 error = bus_dma_tag_create(sc->sc_dmat, alignment,
ffd7c74a 1141 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
3db796ac 1142 1, size, flags, &dma->tag);
ffd7c74a
JT
1143 if (error != 0) {
1144 device_printf(sc->sc_dev,
1145 "%s: bus_dma_tag_create failed, error %d\n",
1146 __func__, error);
1147 goto fail;
1148 }
1149 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1150 flags | BUS_DMA_ZERO, &dma->map);
1151 if (error != 0) {
1152 device_printf(sc->sc_dev,
1153 "%s: bus_dmamem_alloc failed, error %d\n", __func__, error);
1154 goto fail;
1155 }
1156 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
1157 size, iwn_dma_map_addr, &dma->paddr, flags);
1158 if (error != 0) {
1159 device_printf(sc->sc_dev,
1160 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1161 goto fail;
1162 }
1163
1164 if (kvap != NULL)
1165 *kvap = dma->vaddr;
1166 return 0;
1167fail:
1168 iwn_dma_contig_free(dma);
1169 return error;
1170}
1171
1172static void
1173iwn_dma_contig_free(struct iwn_dma_info *dma)
1174{
1175 if (dma->tag != NULL) {
1176 if (dma->map != NULL) {
1177 if (dma->paddr == 0) {
1178 bus_dmamap_sync(dma->tag, dma->map,
1179 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1180 bus_dmamap_unload(dma->tag, dma->map);
1181 }
1182 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1183 }
1184 bus_dma_tag_destroy(dma->tag);
1185 }
1186}
1187
1188static int
1189iwn_alloc_sched(struct iwn_softc *sc)
1190{
1191 /* TX scheduler rings must be aligned on a 1KB boundary. */
1192 return iwn_dma_contig_alloc(sc, &sc->sched_dma,
1193 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT);
1194}
1195
1196static void
1197iwn_free_sched(struct iwn_softc *sc)
1198{
1199 iwn_dma_contig_free(&sc->sched_dma);
1200}
1201
1202static int
1203iwn_alloc_kw(struct iwn_softc *sc)
1204{
1205 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1206 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096,
1207 BUS_DMA_NOWAIT);
1208}
1209
1210static void
1211iwn_free_kw(struct iwn_softc *sc)
1212{
1213 iwn_dma_contig_free(&sc->kw_dma);
1214}
1215
1216static int
1217iwn_alloc_ict(struct iwn_softc *sc)
1218{
1219 /* ICT table must be aligned on a 4KB boundary. */
1220 return iwn_dma_contig_alloc(sc, &sc->ict_dma,
1221 (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT);
1222}
1223
1224static void
1225iwn_free_ict(struct iwn_softc *sc)
1226{
1227 iwn_dma_contig_free(&sc->ict_dma);
1228}
1229
1230static int
1231iwn_alloc_fwmem(struct iwn_softc *sc)
1232{
1233 /* Must be aligned on a 16-byte boundary. */
1234 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL,
1235 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT);
1236}
1237
1238static void
1239iwn_free_fwmem(struct iwn_softc *sc)
1240{
1241 iwn_dma_contig_free(&sc->fw_dma);
1242}
1243
1244static int
1245iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1246{
1247 bus_size_t size;
1248 int i, error;
1249
1250 ring->cur = 0;
1251
1252 /* Allocate RX descriptors (256-byte aligned). */
1253 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1254 error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1255 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1256 if (error != 0) {
1257 device_printf(sc->sc_dev,
1258 "%s: could not allocate Rx ring DMA memory, error %d\n",
1259 __func__, error);
1260 goto fail;
1261 }
1262
3db796ac 1263 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
ffd7c74a 1264 BUS_SPACE_MAXADDR_32BIT,
3db796ac
JT
1265 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
1266 MCLBYTES, BUS_DMA_NOWAIT, &ring->data_dmat);
ffd7c74a
JT
1267 if (error != 0) {
1268 device_printf(sc->sc_dev,
1269 "%s: bus_dma_tag_create_failed, error %d\n",
1270 __func__, error);
1271 goto fail;
1272 }
1273
1274 /* Allocate RX status area (16-byte aligned). */
1275 error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1276 (void **)&ring->stat, sizeof (struct iwn_rx_status),
1277 16, BUS_DMA_NOWAIT);
1278 if (error != 0) {
1279 device_printf(sc->sc_dev,
1280 "%s: could not allocate Rx status DMA memory, error %d\n",
1281 __func__, error);
1282 goto fail;
1283 }
1284
1285 /*
1286 * Allocate and map RX buffers.
1287 */
1288 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1289 struct iwn_rx_data *data = &ring->data[i];
1290 bus_addr_t paddr;
1291
1292 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1293 if (error != 0) {
1294 device_printf(sc->sc_dev,
1295 "%s: bus_dmamap_create failed, error %d\n",
1296 __func__, error);
1297 goto fail;
1298 }
1299
3db796ac 1300 data->m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
ffd7c74a
JT
1301 if (data->m == NULL) {
1302 device_printf(sc->sc_dev,
1303 "%s: could not allocate rx mbuf\n", __func__);
1304 error = ENOMEM;
1305 goto fail;
1306 }
1307
1308 /* Map page. */
1309 error = bus_dmamap_load(ring->data_dmat, data->map,
3db796ac 1310 mtod(data->m, caddr_t), MCLBYTES,
ffd7c74a
JT
1311 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1312 if (error != 0 && error != EFBIG) {
1313 device_printf(sc->sc_dev,
1314 "%s: bus_dmamap_load failed, error %d\n",
1315 __func__, error);
1316 m_freem(data->m);
1317 error = ENOMEM; /* XXX unique code */
1318 goto fail;
1319 }
1320 bus_dmamap_sync(ring->data_dmat, data->map,
1321 BUS_DMASYNC_PREWRITE);
1322
1323 /* Set physical address of RX buffer (256-byte aligned). */
1324 ring->desc[i] = htole32(paddr >> 8);
1325 }
1326 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1327 BUS_DMASYNC_PREWRITE);
1328 return 0;
1329fail:
1330 iwn_free_rx_ring(sc, ring);
1331 return error;
1332}
1333
1334static void
1335iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1336{
1337 int ntries;
1338
1339 if (iwn_nic_lock(sc) == 0) {
1340 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1341 for (ntries = 0; ntries < 1000; ntries++) {
1342 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1343 IWN_FH_RX_STATUS_IDLE)
1344 break;
1345 DELAY(10);
1346 }
1347 iwn_nic_unlock(sc);
1348#ifdef IWN_DEBUG
1349 if (ntries == 1000)
1350 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
1351 "timeout resetting Rx ring");
1352#endif
1353 }
1354 ring->cur = 0;
1355 sc->last_rx_valid = 0;
1356}
1357
1358static void
1359iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1360{
1361 int i;
1362
1363 iwn_dma_contig_free(&ring->desc_dma);
1364 iwn_dma_contig_free(&ring->stat_dma);
1365
1366 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1367 struct iwn_rx_data *data = &ring->data[i];
1368
1369 if (data->m != NULL) {
1370 bus_dmamap_sync(ring->data_dmat, data->map,
1371 BUS_DMASYNC_POSTREAD);
1372 bus_dmamap_unload(ring->data_dmat, data->map);
1373 m_freem(data->m);
1374 }
1375 if (data->map != NULL)
1376 bus_dmamap_destroy(ring->data_dmat, data->map);
1377 }
1378}
1379
1380static int
1381iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1382{
1383 bus_size_t size;
1384 bus_addr_t paddr;
1385 int i, error;
1386
1387 ring->qid = qid;
1388 ring->queued = 0;
1389 ring->cur = 0;
1390
1391 /* Allocate TX descriptors (256-byte aligned.) */
1392 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc);
1393 error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1394 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1395 if (error != 0) {
1396 device_printf(sc->sc_dev,
1397 "%s: could not allocate TX ring DMA memory, error %d\n",
1398 __func__, error);
1399 goto fail;
1400 }
1401
1402 /*
1403 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1404 * to allocate commands space for other rings.
1405 */
1406 if (qid > 4)
1407 return 0;
1408
1409 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd);
1410 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma,
1411 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT);
1412 if (error != 0) {
1413 device_printf(sc->sc_dev,
1414 "%s: could not allocate TX cmd DMA memory, error %d\n",
1415 __func__, error);
1416 goto fail;
1417 }
1418
3db796ac 1419 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
ffd7c74a
JT
1420 BUS_SPACE_MAXADDR_32BIT,
1421 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1,
3db796ac 1422 MCLBYTES, BUS_DMA_NOWAIT, &ring->data_dmat);
ffd7c74a
JT
1423 if (error != 0) {
1424 device_printf(sc->sc_dev,
1425 "%s: bus_dma_tag_create_failed, error %d\n",
1426 __func__, error);
1427 goto fail;
1428 }
1429
1430 paddr = ring->cmd_dma.paddr;
1431 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1432 struct iwn_tx_data *data = &ring->data[i];
1433
1434 data->cmd_paddr = paddr;
1435 data->scratch_paddr = paddr + 12;
1436 paddr += sizeof (struct iwn_tx_cmd);
1437
1438 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1439 if (error != 0) {
1440 device_printf(sc->sc_dev,
1441 "%s: bus_dmamap_create failed, error %d\n",
1442 __func__, error);
1443 goto fail;
1444 }
1445 bus_dmamap_sync(ring->data_dmat, data->map,
1446 BUS_DMASYNC_PREWRITE);
1447 }
1448 return 0;
1449fail:
1450 iwn_free_tx_ring(sc, ring);
1451 return error;
1452}
1453
1454static void
1455iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1456{
1457 int i;
1458
1459 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1460 struct iwn_tx_data *data = &ring->data[i];
1461
1462 if (data->m != NULL) {
1463 bus_dmamap_unload(ring->data_dmat, data->map);
1464 m_freem(data->m);
1465 data->m = NULL;
1466 }
1467 }
1468 /* Clear TX descriptors. */
1469 memset(ring->desc, 0, ring->desc_dma.size);
1470 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1471 BUS_DMASYNC_PREWRITE);
1472 sc->qfullmsk &= ~(1 << ring->qid);
1473 ring->queued = 0;
1474 ring->cur = 0;
1475}
1476
1477static void
1478iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1479{
1480 int i;
1481
1482 iwn_dma_contig_free(&ring->desc_dma);
1483 iwn_dma_contig_free(&ring->cmd_dma);
1484
1485 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1486 struct iwn_tx_data *data = &ring->data[i];
1487
1488 if (data->m != NULL) {
1489 bus_dmamap_sync(ring->data_dmat, data->map,
1490 BUS_DMASYNC_POSTWRITE);
1491 bus_dmamap_unload(ring->data_dmat, data->map);
1492 m_freem(data->m);
1493 }
1494 if (data->map != NULL)
1495 bus_dmamap_destroy(ring->data_dmat, data->map);
1496 }
1497}
1498
1499static void
1500iwn5000_ict_reset(struct iwn_softc *sc)
1501{
1502 /* Disable interrupts. */
1503 IWN_WRITE(sc, IWN_INT_MASK, 0);
1504
1505 /* Reset ICT table. */
1506 memset(sc->ict, 0, IWN_ICT_SIZE);
1507 sc->ict_cur = 0;
1508
1509 /* Set physical address of ICT table (4KB aligned.) */
1510 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1511 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1512 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1513
1514 /* Enable periodic RX interrupt. */
1515 sc->int_mask |= IWN_INT_RX_PERIODIC;
1516 /* Switch to ICT interrupt mode in driver. */
1517 sc->sc_flags |= IWN_FLAG_USE_ICT;
1518
1519 /* Re-enable interrupts. */
1520 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1521 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1522}
1523
1524static int
1525iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1526{
1527 const struct iwn_hal *hal = sc->sc_hal;
1528 int error;
1529 uint16_t val;
1530
1531 /* Check whether adapter has an EEPROM or an OTPROM. */
1532 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1533 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1534 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1535 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1536 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1537
1538 /* Adapter has to be powered on for EEPROM access to work. */
1539 error = iwn_apm_init(sc);
1540 if (error != 0) {
1541 device_printf(sc->sc_dev,
1542 "%s: could not power ON adapter, error %d\n",
1543 __func__, error);
1544 return error;
1545 }
1546
1547 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1548 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1549 return EIO;
1550 }
1551 error = iwn_eeprom_lock(sc);
1552 if (error != 0) {
1553 device_printf(sc->sc_dev,
1554 "%s: could not lock ROM, error %d\n",
1555 __func__, error);
1556 return error;
1557 }
1558
1559 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1560 error = iwn_init_otprom(sc);
1561 if (error != 0) {
1562 device_printf(sc->sc_dev,
1563 "%s: could not initialize OTPROM, error %d\n",
1564 __func__, error);
1565 return error;
1566 }
1567 }
1568
1569 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1570 sc->rfcfg = le16toh(val);
1571 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1572
1573 /* Read MAC address. */
1574 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1575
1576 /* Read adapter-specific information from EEPROM. */
1577 hal->read_eeprom(sc);
1578
1579 iwn_apm_stop(sc); /* Power OFF adapter. */
1580
1581 iwn_eeprom_unlock(sc);
1582 return 0;
1583}
1584
1585static void
1586iwn4965_read_eeprom(struct iwn_softc *sc)
1587{
1588 uint32_t addr;
1589 int i;
1590 uint16_t val;
1591
1592 /* Read regulatory domain (4 ASCII characters.) */
1593 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1594
1595 /* Read the list of authorized channels (20MHz ones only.) */
1596 for (i = 0; i < 5; i++) {
1597 addr = iwn4965_regulatory_bands[i];
1598 iwn_read_eeprom_channels(sc, i, addr);
1599 }
1600
1601 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1602 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1603 sc->maxpwr2GHz = val & 0xff;
1604 sc->maxpwr5GHz = val >> 8;
1605 /* Check that EEPROM values are within valid range. */
1606 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1607 sc->maxpwr5GHz = 38;
1608 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1609 sc->maxpwr2GHz = 38;
1610 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1611 sc->maxpwr2GHz, sc->maxpwr5GHz);
1612
1613 /* Read samples for each TX power group. */
1614 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1615 sizeof sc->bands);
1616
1617 /* Read voltage at which samples were taken. */
1618 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1619 sc->eeprom_voltage = (int16_t)le16toh(val);
1620 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1621 sc->eeprom_voltage);
1622
1623#ifdef IWN_DEBUG
1624 /* Print samples. */
1625 if (sc->sc_debug & IWN_DEBUG_ANY) {
1626 for (i = 0; i < IWN_NBANDS; i++)
1627 iwn4965_print_power_group(sc, i);
1628 }
1629#endif
1630}
1631
1632#ifdef IWN_DEBUG
1633static void
1634iwn4965_print_power_group(struct iwn_softc *sc, int i)
1635{
1636 struct iwn4965_eeprom_band *band = &sc->bands[i];
1637 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1638 int j, c;
1639
3db796ac
JT
1640 kprintf("===band %d===\n", i);
1641 kprintf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1642 kprintf("chan1 num=%d\n", chans[0].num);
ffd7c74a
JT
1643 for (c = 0; c < 2; c++) {
1644 for (j = 0; j < IWN_NSAMPLES; j++) {
3db796ac 1645 kprintf("chain %d, sample %d: temp=%d gain=%d "
ffd7c74a
JT
1646 "power=%d pa_det=%d\n", c, j,
1647 chans[0].samples[c][j].temp,
1648 chans[0].samples[c][j].gain,
1649 chans[0].samples[c][j].power,
1650 chans[0].samples[c][j].pa_det);
1651 }
1652 }
3db796ac 1653 kprintf("chan2 num=%d\n", chans[1].num);
ffd7c74a
JT
1654 for (c = 0; c < 2; c++) {
1655 for (j = 0; j < IWN_NSAMPLES; j++) {
3db796ac 1656 kprintf("chain %d, sample %d: temp=%d gain=%d "
ffd7c74a
JT
1657 "power=%d pa_det=%d\n", c, j,
1658 chans[1].samples[c][j].temp,
1659 chans[1].samples[c][j].gain,
1660 chans[1].samples[c][j].power,
1661 chans[1].samples[c][j].pa_det);
1662 }
1663 }
1664}
1665#endif
1666
1667static void
1668iwn5000_read_eeprom(struct iwn_softc *sc)
1669{
1670 struct iwn5000_eeprom_calib_hdr hdr;
1671 int32_t temp, volt;
1672 uint32_t addr, base;
1673 int i;
1674 uint16_t val;
1675
1676 /* Read regulatory domain (4 ASCII characters.) */
1677 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1678 base = le16toh(val);
1679 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1680 sc->eeprom_domain, 4);
1681
1682 /* Read the list of authorized channels (20MHz ones only.) */
1683 for (i = 0; i < 5; i++) {
1684 addr = base + iwn5000_regulatory_bands[i];
1685 iwn_read_eeprom_channels(sc, i, addr);
1686 }
1687
1688 /* Read enhanced TX power information for 6000 Series. */
1689 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1690 iwn_read_eeprom_enhinfo(sc);
1691
1692 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1693 base = le16toh(val);
1694 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1695 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1696 "%s: calib version=%u pa type=%u voltage=%u\n",
1697 __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt));
1698 sc->calib_ver = hdr.version;
1699
1700 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1701 /* Compute temperature offset. */
1702 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1703 temp = le16toh(val);
1704 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1705 volt = le16toh(val);
1706 sc->temp_off = temp - (volt / -5);
1707 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1708 temp, volt, sc->temp_off);
1709 } else {
1710 /* Read crystal calibration. */
1711 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1712 &sc->eeprom_crystal, sizeof (uint32_t));
1713 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1714 le32toh(sc->eeprom_crystal));
1715 }
1716}
1717
1718/*
1719 * Translate EEPROM flags to net80211.
1720 */
1721static uint32_t
1722iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1723{
1724 uint32_t nflags;
1725
1726 nflags = 0;
1727 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1728 nflags |= IEEE80211_CHAN_PASSIVE;
1729 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1730 nflags |= IEEE80211_CHAN_NOADHOC;
1731 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1732 nflags |= IEEE80211_CHAN_DFS;
1733 /* XXX apparently IBSS may still be marked */
1734 nflags |= IEEE80211_CHAN_NOADHOC;
1735 }
1736
1737 return nflags;
1738}
1739
1740static void
1741iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1742{
1743 struct ifnet *ifp = sc->sc_ifp;
1744 struct ieee80211com *ic = ifp->if_l2com;
1745 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1746 const struct iwn_chan_band *band = &iwn_bands[n];
1747 struct ieee80211_channel *c;
1748 int i, chan, nflags;
1749
1750 for (i = 0; i < band->nchan; i++) {
1751 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1752 DPRINTF(sc, IWN_DEBUG_RESET,
1753 "skip chan %d flags 0x%x maxpwr %d\n",
1754 band->chan[i], channels[i].flags,
1755 channels[i].maxpwr);
1756 continue;
1757 }
1758 chan = band->chan[i];
1759 nflags = iwn_eeprom_channel_flags(&channels[i]);
1760
1761 DPRINTF(sc, IWN_DEBUG_RESET,
1762 "add chan %d flags 0x%x maxpwr %d\n",
1763 chan, channels[i].flags, channels[i].maxpwr);
1764
1765 c = &ic->ic_channels[ic->ic_nchans++];
1766 c->ic_ieee = chan;
1767 c->ic_maxregpower = channels[i].maxpwr;
1768 c->ic_maxpower = 2*c->ic_maxregpower;
1769
1770 /* Save maximum allowed TX power for this channel. */
1771 sc->maxpwr[chan] = channels[i].maxpwr;
1772
1773 if (n == 0) { /* 2GHz band */
1774 c->ic_freq = ieee80211_ieee2mhz(chan,
1775 IEEE80211_CHAN_G);
1776
1777 /* G =>'s B is supported */
1778 c->ic_flags = IEEE80211_CHAN_B | nflags;
1779
1780 c = &ic->ic_channels[ic->ic_nchans++];
1781 c[0] = c[-1];
1782 c->ic_flags = IEEE80211_CHAN_G | nflags;
1783 } else { /* 5GHz band */
1784 c->ic_freq = ieee80211_ieee2mhz(chan,
1785 IEEE80211_CHAN_A);
1786 c->ic_flags = IEEE80211_CHAN_A | nflags;
1787 sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1788 }
1789#if 0 /* HT */
1790 /* XXX no constraints on using HT20 */
1791 /* add HT20, HT40 added separately */
1792 c = &ic->ic_channels[ic->ic_nchans++];
1793 c[0] = c[-1];
1794 c->ic_flags |= IEEE80211_CHAN_HT20;
1795 /* XXX NARROW =>'s 1/2 and 1/4 width? */
1796#endif
1797 }
1798}
1799
1800#if 0 /* HT */
1801static void
1802iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1803{
1804 struct ifnet *ifp = sc->sc_ifp;
1805 struct ieee80211com *ic = ifp->if_l2com;
1806 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1807 const struct iwn_chan_band *band = &iwn_bands[n];
1808 struct ieee80211_channel *c, *cent, *extc;
1809 int i;
1810
1811 for (i = 0; i < band->nchan; i++) {
1812 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) ||
1813 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) {
1814 DPRINTF(sc, IWN_DEBUG_RESET,
1815 "skip chan %d flags 0x%x maxpwr %d\n",
1816 band->chan[i], channels[i].flags,
1817 channels[i].maxpwr);
1818 continue;
1819 }
1820 /*
1821 * Each entry defines an HT40 channel pair; find the
1822 * center channel, then the extension channel above.
1823 */
1824 cent = ieee80211_find_channel_byieee(ic, band->chan[i],
1825 band->flags & ~IEEE80211_CHAN_HT);
1826 if (cent == NULL) { /* XXX shouldn't happen */
1827 device_printf(sc->sc_dev,
1828 "%s: no entry for channel %d\n",
1829 __func__, band->chan[i]);
1830 continue;
1831 }
1832 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1833 band->flags & ~IEEE80211_CHAN_HT);
1834 if (extc == NULL) {
1835 DPRINTF(sc, IWN_DEBUG_RESET,
1836 "skip chan %d, extension channel not found\n",
1837 band->chan[i]);
1838 continue;
1839 }
1840
1841 DPRINTF(sc, IWN_DEBUG_RESET,
1842 "add ht40 chan %d flags 0x%x maxpwr %d\n",
1843 band->chan[i], channels[i].flags, channels[i].maxpwr);
1844
1845 c = &ic->ic_channels[ic->ic_nchans++];
1846 c[0] = cent[0];
1847 c->ic_extieee = extc->ic_ieee;
1848 c->ic_flags &= ~IEEE80211_CHAN_HT;
1849 c->ic_flags |= IEEE80211_CHAN_HT40U;
1850 c = &ic->ic_channels[ic->ic_nchans++];
1851 c[0] = extc[0];
1852 c->ic_extieee = cent->ic_ieee;
1853 c->ic_flags &= ~IEEE80211_CHAN_HT;
1854 c->ic_flags |= IEEE80211_CHAN_HT40D;
1855 }
1856}
1857#endif
1858
1859static void
1860iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1861{
1862 struct ifnet *ifp = sc->sc_ifp;
1863 struct ieee80211com *ic = ifp->if_l2com;
1864
1865 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1866 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1867
1868 if (n < 5)
1869 iwn_read_eeprom_band(sc, n);
1870#if 0 /* HT */
1871 else
1872 iwn_read_eeprom_ht40(sc, n);
1873#endif
1874 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1875}
1876
1877#define nitems(_a) (sizeof((_a)) / sizeof((_a)[0]))
1878
1879static void
1880iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1881{
1882 struct iwn_eeprom_enhinfo enhinfo[35];
1883 uint16_t val, base;
1884 int8_t maxpwr;
1885 int i;
1886
1887 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1888 base = le16toh(val);
1889 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1890 enhinfo, sizeof enhinfo);
1891
1892 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1893 for (i = 0; i < nitems(enhinfo); i++) {
1894 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1895 continue; /* Skip invalid entries. */
1896
1897 maxpwr = 0;
1898 if (sc->txchainmask & IWN_ANT_A)
1899 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1900 if (sc->txchainmask & IWN_ANT_B)
1901 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1902 if (sc->txchainmask & IWN_ANT_C)
1903 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1904 if (sc->ntxchains == 2)
1905 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1906 else if (sc->ntxchains == 3)
1907 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1908 maxpwr /= 2; /* Convert half-dBm to dBm. */
1909
1910 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i,
1911 maxpwr);
1912 sc->enh_maxpwr[i] = maxpwr;
1913 }
1914}
1915
1916static struct ieee80211_node *
1917iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1918{
3db796ac 1919 return kmalloc(sizeof (struct iwn_node), M_80211_NODE,M_INTWAIT | M_ZERO);
ffd7c74a
JT
1920}
1921
1922static void
1923iwn_newassoc(struct ieee80211_node *ni, int isnew)
1924{
1925 /* XXX move */
3db796ac
JT
1926 //if (!isnew) {
1927 ieee80211_ratectl_node_deinit(ni);
1928 //}
1929
ffd7c74a
JT
1930 ieee80211_ratectl_node_init(ni);
1931}
1932
1933static int
1934iwn_media_change(struct ifnet *ifp)
1935{
1936 int error = ieee80211_media_change(ifp);
1937 /* NB: only the fixed rate can change and that doesn't need a reset */
1938 return (error == ENETRESET ? 0 : error);
1939}
1940
1941static int
1942iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1943{
1944 struct iwn_vap *ivp = IWN_VAP(vap);
1945 struct ieee80211com *ic = vap->iv_ic;
1946 struct iwn_softc *sc = ic->ic_ifp->if_softc;
1947 int error;
1948
1949 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1950 ieee80211_state_name[vap->iv_state],
1951 ieee80211_state_name[nstate]);
1952
1953 IEEE80211_UNLOCK(ic);
1954 IWN_LOCK(sc);
1955 callout_stop(&sc->sc_timer_to);
1956
1957 if (nstate == IEEE80211_S_AUTH && vap->iv_state != IEEE80211_S_AUTH) {
1958 /* !AUTH -> AUTH requires adapter config */
1959 /* Reset state to handle reassociations correctly. */
1960 sc->rxon.associd = 0;
1961 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1962 iwn_calib_reset(sc);
1963 error = iwn_auth(sc, vap);
1964 }
1965 if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) {
1966 /*
1967 * !RUN -> RUN requires setting the association id
1968 * which is done with a firmware cmd. We also defer
1969 * starting the timers until that work is done.
1970 */
1971 error = iwn_run(sc, vap);
1972 }
1973 if (nstate == IEEE80211_S_RUN) {
1974 /*
1975 * RUN -> RUN transition; just restart the timers.
1976 */
1977 iwn_calib_reset(sc);
1978 }
1979 IWN_UNLOCK(sc);
1980 IEEE80211_LOCK(ic);
1981 return ivp->iv_newstate(vap, nstate, arg);
1982}
1983
1984/*
1985 * Process an RX_PHY firmware notification. This is usually immediately
1986 * followed by an MPDU_RX_DONE notification.
1987 */
1988static void
1989iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1990 struct iwn_rx_data *data)
1991{
1992 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1993
1994 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
1995 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
1996
1997 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
1998 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
1999 sc->last_rx_valid = 1;
2000}
2001
2002static void
2003iwn_timer_timeout(void *arg)
2004{
2005 struct iwn_softc *sc = arg;
2006 uint32_t flags = 0;
2007
b008e137 2008 IWN_LOCK(sc);
ffd7c74a
JT
2009
2010 if (sc->calib_cnt && --sc->calib_cnt == 0) {
2011 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2012 "send statistics request");
2013 (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2014 sizeof flags, 1);
2015 sc->calib_cnt = 60; /* do calibration every 60s */
2016 }
2017 iwn_watchdog(sc); /* NB: piggyback tx watchdog */
2018 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
b008e137 2019 IWN_UNLOCK(sc);
ffd7c74a
JT
2020}
2021
2022static void
2023iwn_calib_reset(struct iwn_softc *sc)
2024{
2025 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2026 sc->calib_cnt = 60; /* do calibration every 60s */
2027}
2028
2029/*
2030 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2031 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2032 */
2033static void
2034iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2035 struct iwn_rx_data *data)
2036{
2037 const struct iwn_hal *hal = sc->sc_hal;
2038 struct ifnet *ifp = sc->sc_ifp;
2039 struct ieee80211com *ic = ifp->if_l2com;
2040 struct iwn_rx_ring *ring = &sc->rxq;
2041 struct ieee80211_frame *wh;
2042 struct ieee80211_node *ni;
2043 struct mbuf *m, *m1;
2044 struct iwn_rx_stat *stat;
2045 caddr_t head;
2046 bus_addr_t paddr;
2047 uint32_t flags;
2048 int error, len, rssi, nf;
2049
2050 if (desc->type == IWN_MPDU_RX_DONE) {
2051 /* Check for prior RX_PHY notification. */
2052 if (!sc->last_rx_valid) {
2053 DPRINTF(sc, IWN_DEBUG_ANY,
2054 "%s: missing RX_PHY\n", __func__);
2055 ifp->if_ierrors++;
2056 return;
2057 }
2058 sc->last_rx_valid = 0;
2059 stat = &sc->last_rx_stat;
2060 } else
2061 stat = (struct iwn_rx_stat *)(desc + 1);
2062
2063 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2064
2065 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2066 device_printf(sc->sc_dev,
2067 "%s: invalid rx statistic header, len %d\n",
2068 __func__, stat->cfg_phy_len);
2069 ifp->if_ierrors++;
2070 return;
2071 }
2072 if (desc->type == IWN_MPDU_RX_DONE) {
2073 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2074 head = (caddr_t)(mpdu + 1);
2075 len = le16toh(mpdu->len);
2076 } else {
2077 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2078 len = le16toh(stat->len);
2079 }
2080
2081 flags = le32toh(*(uint32_t *)(head + len));
2082
2083 /* Discard frames with a bad FCS early. */
2084 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2085 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n",
2086 __func__, flags);
2087 ifp->if_ierrors++;
2088 return;
2089 }
2090 /* Discard frames that are too short. */
2091 if (len < sizeof (*wh)) {
2092 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2093 __func__, len);
2094 ifp->if_ierrors++;
2095 return;
2096 }
2097
2098 /* XXX don't need mbuf, just dma buffer */
3db796ac 2099 m1 = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
ffd7c74a
JT
2100 if (m1 == NULL) {
2101 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2102 __func__);
2103 ifp->if_ierrors++;
2104 return;
2105 }
2106 bus_dmamap_unload(ring->data_dmat, data->map);
2107
2108 error = bus_dmamap_load(ring->data_dmat, data->map,
3db796ac 2109 mtod(m1, caddr_t), MCLBYTES,
ffd7c74a
JT
2110 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2111 if (error != 0 && error != EFBIG) {
2112 device_printf(sc->sc_dev,
2113 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2114 m_freem(m1);
2115 ifp->if_ierrors++;
2116 return;
2117 }
2118
2119 m = data->m;
2120 data->m = m1;
2121 /* Update RX descriptor. */
2122 ring->desc[ring->cur] = htole32(paddr >> 8);
2123 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2124 BUS_DMASYNC_PREWRITE);
2125
2126 /* Finalize mbuf. */
2127 m->m_pkthdr.rcvif = ifp;
2128 m->m_data = head;
2129 m->m_pkthdr.len = m->m_len = len;
2130
2131 rssi = hal->get_rssi(sc, stat);
2132
2133 /* Grab a reference to the source node. */
2134 wh = mtod(m, struct ieee80211_frame *);
2135 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2136 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2137 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2138
2139 if (ieee80211_radiotap_active(ic)) {
2140 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2141
2142 tap->wr_tsft = htole64(stat->tstamp);
2143 tap->wr_flags = 0;
2144 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2145 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2146 switch (stat->rate) {
2147 /* CCK rates. */
2148 case 10: tap->wr_rate = 2; break;
2149 case 20: tap->wr_rate = 4; break;
2150 case 55: tap->wr_rate = 11; break;
2151 case 110: tap->wr_rate = 22; break;
2152 /* OFDM rates. */
2153 case 0xd: tap->wr_rate = 12; break;
2154 case 0xf: tap->wr_rate = 18; break;
2155 case 0x5: tap->wr_rate = 24; break;
2156 case 0x7: tap->wr_rate = 36; break;
2157 case 0x9: tap->wr_rate = 48; break;
2158 case 0xb: tap->wr_rate = 72; break;
2159 case 0x1: tap->wr_rate = 96; break;
2160 case 0x3: tap->wr_rate = 108; break;
2161 /* Unknown rate: should not happen. */
2162 default: tap->wr_rate = 0;
2163 }
2164 tap->wr_dbm_antsignal = rssi;
2165 tap->wr_dbm_antnoise = nf;
2166 }
2167
2168 IWN_UNLOCK(sc);
2169
2170 /* Send the frame to the 802.11 layer. */
2171 if (ni != NULL) {
2172 (void) ieee80211_input(ni, m, rssi - nf, nf);
2173 /* Node is no longer needed. */
2174 ieee80211_free_node(ni);
2175 } else
2176 (void) ieee80211_input_all(ic, m, rssi - nf, nf);
2177
2178 IWN_LOCK(sc);
2179}
2180
2181#if 0 /* HT */
2182/* Process an incoming Compressed BlockAck. */
2183static void
2184iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2185 struct iwn_rx_data *data)
2186{
2187 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2188 struct iwn_tx_ring *txq;
2189
2190 txq = &sc->txq[letoh16(ba->qid)];
2191 /* XXX TBD */
2192}
2193#endif
2194
2195/*
2196 * Process a CALIBRATION_RESULT notification sent by the initialization
2197 * firmware on response to a CMD_CALIB_CONFIG command (5000 only.)
2198 */
2199static void
2200iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2201 struct iwn_rx_data *data)
2202{
2203 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2204 int len, idx = -1;
2205
2206 /* Runtime firmware should not send such a notification. */
2207 if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2208 return;
2209
2210 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2211 len = (le32toh(desc->len) & 0x3fff) - 4;
2212
2213 switch (calib->code) {
2214 case IWN5000_PHY_CALIB_DC:
2215 if (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2216 sc->hw_type == IWN_HW_REV_TYPE_6050)
2217 idx = 0;
2218 break;
2219 case IWN5000_PHY_CALIB_LO:
2220 idx = 1;
2221 break;
2222 case IWN5000_PHY_CALIB_TX_IQ:
2223 idx = 2;
2224 break;
2225 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2226 if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2227 sc->hw_type != IWN_HW_REV_TYPE_5150)
2228 idx = 3;
2229 break;
2230 case IWN5000_PHY_CALIB_BASE_BAND:
2231 idx = 4;
2232 break;
2233 }
2234 if (idx == -1) /* Ignore other results. */
2235 return;
2236
2237 /* Save calibration result. */
2238 if (sc->calibcmd[idx].buf != NULL)
3db796ac
JT
2239 kfree(sc->calibcmd[idx].buf, M_DEVBUF);
2240 sc->calibcmd[idx].buf = kmalloc(len, M_DEVBUF, M_INTWAIT);
ffd7c74a
JT
2241 if (sc->calibcmd[idx].buf == NULL) {
2242 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2243 "not enough memory for calibration result %d\n",
2244 calib->code);
2245 return;
2246 }
2247 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2248 "saving calibration result code=%d len=%d\n", calib->code, len);
2249 sc->calibcmd[idx].len = len;
2250 memcpy(sc->calibcmd[idx].buf, calib, len);
2251}
2252
2253/*
2254 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2255 * The latter is sent by the firmware after each received beacon.
2256 */
2257static void
2258iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2259 struct iwn_rx_data *data)
2260{
2261 const struct iwn_hal *hal = sc->sc_hal;
2262 struct ifnet *ifp = sc->sc_ifp;
2263 struct ieee80211com *ic = ifp->if_l2com;
2264 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2265 struct iwn_calib_state *calib = &sc->calib;
2266 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2267 int temp;
2268
2269 /* Beacon stats are meaningful only when associated and not scanning. */
2270 if (vap->iv_state != IEEE80211_S_RUN ||
2271 (ic->ic_flags & IEEE80211_F_SCAN))
2272 return;
2273
2274 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2275 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type);
2276 iwn_calib_reset(sc); /* Reset TX power calibration timeout. */
2277
2278 /* Test if temperature has changed. */
2279 if (stats->general.temp != sc->rawtemp) {
2280 /* Convert "raw" temperature to degC. */
2281 sc->rawtemp = stats->general.temp;
2282 temp = hal->get_temperature(sc);
2283 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2284 __func__, temp);
2285
2286 /* Update TX power if need be (4965AGN only.) */
2287 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2288 iwn4965_power_calibration(sc, temp);
2289 }
2290
2291 if (desc->type != IWN_BEACON_STATISTICS)
2292 return; /* Reply to a statistics request. */
2293
2294 sc->noise = iwn_get_noise(&stats->rx.general);
2295 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2296
2297 /* Test that RSSI and noise are present in stats report. */
2298 if (le32toh(stats->rx.general.flags) != 1) {
2299 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2300 "received statistics without RSSI");
2301 return;
2302 }
2303
2304 if (calib->state == IWN_CALIB_STATE_ASSOC)
2305 iwn_collect_noise(sc, &stats->rx.general);
2306 else if (calib->state == IWN_CALIB_STATE_RUN)
2307 iwn_tune_sensitivity(sc, &stats->rx);
2308}
2309
2310/*
2311 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2312 * and 5000 adapters have different incompatible TX status formats.
2313 */
2314static void
2315iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2316 struct iwn_rx_data *data)
2317{
2318 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2319 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2320
2321 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2322 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2323 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2324 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2325 le32toh(stat->status));
2326
2327 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2328 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2329}
2330
2331static void
2332iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2333 struct iwn_rx_data *data)
2334{
2335 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2336 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2337
2338 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2339 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2340 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2341 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2342 le32toh(stat->status));
2343
2344#ifdef notyet
2345 /* Reset TX scheduler slot. */
2346 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2347#endif
2348
2349 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2350 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2351}
2352
2353/*
2354 * Adapter-independent backend for TX_DONE firmware notifications.
2355 */
2356static void
2357iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2358 uint8_t status)
2359{
2360 struct ifnet *ifp = sc->sc_ifp;
2361 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2362 struct iwn_tx_data *data = &ring->data[desc->idx];
2363 struct mbuf *m;
2364 struct ieee80211_node *ni;
2365 struct ieee80211vap *vap;
2366
2367 KASSERT(data->ni != NULL, ("no node"));
2368
2369 /* Unmap and free mbuf. */
2370 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2371 bus_dmamap_unload(ring->data_dmat, data->map);
2372 m = data->m, data->m = NULL;
2373 ni = data->ni, data->ni = NULL;
2374 vap = ni->ni_vap;
2375
2376 if (m->m_flags & M_TXCB) {
2377 /*
2378 * Channels marked for "radar" require traffic to be received
2379 * to unlock before we can transmit. Until traffic is seen
2380 * any attempt to transmit is returned immediately with status
2381 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
2382 * happen on first authenticate after scanning. To workaround
2383 * this we ignore a failure of this sort in AUTH state so the
2384 * 802.11 layer will fall back to using a timeout to wait for
2385 * the AUTH reply. This allows the firmware time to see
2386 * traffic so a subsequent retry of AUTH succeeds. It's
2387 * unclear why the firmware does not maintain state for
2388 * channels recently visited as this would allow immediate
2389 * use of the channel after a scan (where we see traffic).
2390 */
2391 if (status == IWN_TX_FAIL_TX_LOCKED &&
2392 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2393 ieee80211_process_callback(ni, m, 0);
2394 else
2395 ieee80211_process_callback(ni, m,
2396 (status & IWN_TX_FAIL) != 0);
2397 }
2398
2399 /*
2400 * Update rate control statistics for the node.
2401 */
2402 if (status & 0x80) {
2403 ifp->if_oerrors++;
2404 ieee80211_ratectl_tx_complete(vap, ni,
2405 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2406 } else {
2407 ieee80211_ratectl_tx_complete(vap, ni,
2408 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2409 }
2410 m_freem(m);
2411 ieee80211_free_node(ni);
2412
2413 sc->sc_tx_timer = 0;
2414 if (--ring->queued < IWN_TX_RING_LOMARK) {
2415 sc->qfullmsk &= ~(1 << ring->qid);
2416 if (sc->qfullmsk == 0 &&
3db796ac
JT
2417 (ifp->if_flags & IFF_OACTIVE)) {
2418 ifp->if_flags &= ~IFF_OACTIVE;
ffd7c74a
JT
2419 iwn_start_locked(ifp);
2420 }
2421 }
2422}
2423
2424/*
2425 * Process a "command done" firmware notification. This is where we wakeup
2426 * processes waiting for a synchronous command completion.
2427 */
2428static void
2429iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2430{
2431 struct iwn_tx_ring *ring = &sc->txq[4];
2432 struct iwn_tx_data *data;
2433
2434 if ((desc->qid & 0xf) != 4)
2435 return; /* Not a command ack. */
2436
2437 data = &ring->data[desc->idx];
2438
2439 /* If the command was mapped in an mbuf, free it. */
2440 if (data->m != NULL) {
2441 bus_dmamap_unload(ring->data_dmat, data->map);
2442 m_freem(data->m);
2443 data->m = NULL;
2444 }
2445 wakeup(&ring->desc[desc->idx]);
2446}
2447
2448/*
2449 * Process an INT_FH_RX or INT_SW_RX interrupt.
2450 */
2451static void
2452iwn_notif_intr(struct iwn_softc *sc)
2453{
2454 struct ifnet *ifp = sc->sc_ifp;
2455 struct ieee80211com *ic = ifp->if_l2com;
2456 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2457 uint16_t hw;
2458
2459 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2460 BUS_DMASYNC_POSTREAD);
2461
2462 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2463 while (sc->rxq.cur != hw) {
2464 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2465 struct iwn_rx_desc *desc;
2466
2467 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2468 BUS_DMASYNC_POSTREAD);
2469 desc = mtod(data->m, struct iwn_rx_desc *);
2470
2471 DPRINTF(sc, IWN_DEBUG_RECV,
2472 "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2473 __func__, desc->qid & 0xf, desc->idx, desc->flags,
2474 desc->type, iwn_intr_str(desc->type),
2475 le16toh(desc->len));
2476
2477 if (!(desc->qid & 0x80)) /* Reply to a command. */
2478 iwn_cmd_done(sc, desc);
2479
2480 switch (desc->type) {
2481 case IWN_RX_PHY:
2482 iwn_rx_phy(sc, desc, data);
2483 break;
2484
2485 case IWN_RX_DONE: /* 4965AGN only. */
2486 case IWN_MPDU_RX_DONE:
2487 /* An 802.11 frame has been received. */
2488 iwn_rx_done(sc, desc, data);
2489 break;
2490
2491#if 0 /* HT */
2492 case IWN_RX_COMPRESSED_BA:
2493 /* A Compressed BlockAck has been received. */
2494 iwn_rx_compressed_ba(sc, desc, data);
2495 break;
2496#endif
2497
2498 case IWN_TX_DONE:
2499 /* An 802.11 frame has been transmitted. */
2500 sc->sc_hal->tx_done(sc, desc, data);
2501 break;
2502
2503 case IWN_RX_STATISTICS:
2504 case IWN_BEACON_STATISTICS:
2505 iwn_rx_statistics(sc, desc, data);
2506 break;
2507
2508 case IWN_BEACON_MISSED:
2509 {
2510 struct iwn_beacon_missed *miss =
2511 (struct iwn_beacon_missed *)(desc + 1);
2512 int misses;
2513
2514 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2515 BUS_DMASYNC_POSTREAD);
2516 misses = le32toh(miss->consecutive);
2517
2518 /* XXX not sure why we're notified w/ zero */
2519 if (misses == 0)
2520 break;
2521 DPRINTF(sc, IWN_DEBUG_STATE,
2522 "%s: beacons missed %d/%d\n", __func__,
2523 misses, le32toh(miss->total));
2524
2525 /*
2526 * If more than 5 consecutive beacons are missed,
2527 * reinitialize the sensitivity state machine.
2528 */
2529 if (vap->iv_state == IEEE80211_S_RUN && misses > 5)
2530 (void) iwn_init_sensitivity(sc);
2531 if (misses >= vap->iv_bmissthreshold) {
2532 IWN_UNLOCK(sc);
2533 ieee80211_beacon_miss(ic);
2534 IWN_LOCK(sc);
2535 }
2536 break;
2537 }
2538 case IWN_UC_READY:
2539 {
2540 struct iwn_ucode_info *uc =
2541 (struct iwn_ucode_info *)(desc + 1);
2542
2543 /* The microcontroller is ready. */
2544 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2545 BUS_DMASYNC_POSTREAD);
2546 DPRINTF(sc, IWN_DEBUG_RESET,
2547 "microcode alive notification version=%d.%d "
2548 "subtype=%x alive=%x\n", uc->major, uc->minor,
2549 uc->subtype, le32toh(uc->valid));
2550
2551 if (le32toh(uc->valid) != 1) {
2552 device_printf(sc->sc_dev,
2553 "microcontroller initialization failed");
2554 break;
2555 }
2556 if (uc->subtype == IWN_UCODE_INIT) {
2557 /* Save microcontroller report. */
2558 memcpy(&sc->ucode_info, uc, sizeof (*uc));
2559 }
2560 /* Save the address of the error log in SRAM. */
2561 sc->errptr = le32toh(uc->errptr);
2562 break;
2563 }
2564 case IWN_STATE_CHANGED:
2565 {
2566 uint32_t *status = (uint32_t *)(desc + 1);
2567
2568 /*
2569 * State change allows hardware switch change to be
2570 * noted. However, we handle this in iwn_intr as we
2571 * get both the enable/disble intr.
2572 */
2573 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2574 BUS_DMASYNC_POSTREAD);
2575 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
2576 le32toh(*status));
2577 break;
2578 }
2579 case IWN_START_SCAN:
2580 {
2581 struct iwn_start_scan *scan =
2582 (struct iwn_start_scan *)(desc + 1);
2583
2584 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2585 BUS_DMASYNC_POSTREAD);
2586 DPRINTF(sc, IWN_DEBUG_ANY,
2587 "%s: scanning channel %d status %x\n",
2588 __func__, scan->chan, le32toh(scan->status));
2589 break;
2590 }
2591 case IWN_STOP_SCAN:
2592 {
2593 struct iwn_stop_scan *scan =
2594 (struct iwn_stop_scan *)(desc + 1);
2595
2596 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2597 BUS_DMASYNC_POSTREAD);
2598 DPRINTF(sc, IWN_DEBUG_STATE,
2599 "scan finished nchan=%d status=%d chan=%d\n",
2600 scan->nchan, scan->status, scan->chan);
2601
2602 IWN_UNLOCK(sc);
2603 ieee80211_scan_next(vap);
2604 IWN_LOCK(sc);
2605 break;
2606 }
2607 case IWN5000_CALIBRATION_RESULT:
2608 iwn5000_rx_calib_results(sc, desc, data);
2609 break;
2610
2611 case IWN5000_CALIBRATION_DONE:
2612 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2613 wakeup(sc);
2614 break;
2615 }
2616
2617 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2618 }
2619
2620 /* Tell the firmware what we have processed. */
2621 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2622 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2623}
2624
2625/*
2626 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2627 * from power-down sleep mode.
2628 */
2629static void
2630iwn_wakeup_intr(struct iwn_softc *sc)
2631{
2632 int qid;
2633
2634 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
2635 __func__);
2636
2637 /* Wakeup RX and TX rings. */
2638 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2639 for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) {
2640 struct iwn_tx_ring *ring = &sc->txq[qid];
2641 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2642 }
2643}
2644
2645static void
2646iwn_rftoggle_intr(struct iwn_softc *sc)
2647{
2648 struct ifnet *ifp = sc->sc_ifp;
2649 struct ieee80211com *ic = ifp->if_l2com;
2650 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
2651
2652 IWN_LOCK_ASSERT(sc);
2653
2654 device_printf(sc->sc_dev, "RF switch: radio %s\n",
2655 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2656 if (tmp & IWN_GP_CNTRL_RFKILL)
2657 ieee80211_runtask(ic, &sc->sc_radioon_task);
2658 else
2659 ieee80211_runtask(ic, &sc->sc_radiooff_task);
2660}
2661
2662/*
2663 * Dump the error log of the firmware when a firmware panic occurs. Although
2664 * we can't debug the firmware because it is neither open source nor free, it
2665 * can help us to identify certain classes of problems.
2666 */
2667static void
2668iwn_fatal_intr(struct iwn_softc *sc)
2669{
2670 const struct iwn_hal *hal = sc->sc_hal;
2671 struct iwn_fw_dump dump;
2672 int i;
2673
2674 IWN_LOCK_ASSERT(sc);
2675
2676 /* Force a complete recalibration on next init. */
2677 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2678
2679 /* Check that the error log address is valid. */
2680 if (sc->errptr < IWN_FW_DATA_BASE ||
2681 sc->errptr + sizeof (dump) >
2682 IWN_FW_DATA_BASE + hal->fw_data_maxsz) {
3db796ac 2683 kprintf("%s: bad firmware error log address 0x%08x\n",
ffd7c74a
JT
2684 __func__, sc->errptr);
2685 return;
2686 }
2687 if (iwn_nic_lock(sc) != 0) {
3db796ac 2688 kprintf("%s: could not read firmware error log\n",
ffd7c74a
JT
2689 __func__);
2690 return;
2691 }
2692 /* Read firmware error log from SRAM. */
2693 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2694 sizeof (dump) / sizeof (uint32_t));
2695 iwn_nic_unlock(sc);
2696
2697 if (dump.valid == 0) {
3db796ac 2698 kprintf("%s: firmware error log is empty\n",
ffd7c74a
JT
2699 __func__);
2700 return;
2701 }
3db796ac
JT
2702 kprintf("firmware error log:\n");
2703 kprintf(" error type = \"%s\" (0x%08X)\n",
ffd7c74a
JT
2704 (dump.id < nitems(iwn_fw_errmsg)) ?
2705 iwn_fw_errmsg[dump.id] : "UNKNOWN",
2706 dump.id);
3db796ac
JT
2707 kprintf(" program counter = 0x%08X\n", dump.pc);
2708 kprintf(" source line = 0x%08X\n", dump.src_line);
2709 kprintf(" error data = 0x%08X%08X\n",
ffd7c74a 2710 dump.error_data[0], dump.error_data[1]);
3db796ac 2711 kprintf(" branch link = 0x%08X%08X\n",
ffd7c74a 2712 dump.branch_link[0], dump.branch_link[1]);
3db796ac 2713 kprintf(" interrupt link = 0x%08X%08X\n",
ffd7c74a 2714 dump.interrupt_link[0], dump.interrupt_link[1]);
3db796ac 2715 kprintf(" time = %u\n", dump.time[0]);
ffd7c74a
JT
2716
2717 /* Dump driver status (TX and RX rings) while we're here. */
3db796ac 2718 kprintf("driver status:\n");
ffd7c74a
JT
2719 for (i = 0; i < hal->ntxqs; i++) {
2720 struct iwn_tx_ring *ring = &sc->txq[i];
3db796ac 2721 kprintf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
ffd7c74a
JT
2722 i, ring->qid, ring->cur, ring->queued);
2723 }
3db796ac 2724 kprintf(" rx ring: cur=%d\n", sc->rxq.cur);
ffd7c74a
JT
2725}
2726
2727static void
2728iwn_intr(void *arg)
2729{
2730 struct iwn_softc *sc = arg;
2731 struct ifnet *ifp = sc->sc_ifp;
2732 uint32_t r1, r2, tmp;
2733
ffd7c74a
JT
2734 /* Disable interrupts. */
2735 IWN_WRITE(sc, IWN_INT_MASK, 0);
2736
2737 /* Read interrupts from ICT (fast) or from registers (slow). */
2738 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2739 tmp = 0;
2740 while (sc->ict[sc->ict_cur] != 0) {
2741 tmp |= sc->ict[sc->ict_cur];
2742 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
2743 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2744 }
2745 tmp = le32toh(tmp);
2746 if (tmp == 0xffffffff) /* Shouldn't happen. */
2747 tmp = 0;
2748 else if (tmp & 0xc0000) /* Workaround a HW bug. */
2749 tmp |= 0x8000;
2750 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2751 r2 = 0; /* Unused. */
2752 } else {
2753 r1 = IWN_READ(sc, IWN_INT);
2754 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2755 return; /* Hardware gone! */
2756 r2 = IWN_READ(sc, IWN_FH_INT);
2757 }
2758
2759 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
2760
2761 if (r1 == 0 && r2 == 0)
2762 goto done; /* Interrupt not for us. */
2763
2764 /* Acknowledge interrupts. */
2765 IWN_WRITE(sc, IWN_INT, r1);
2766 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2767 IWN_WRITE(sc, IWN_FH_INT, r2);
2768
2769 if (r1 & IWN_INT_RF_TOGGLED) {
2770 iwn_rftoggle_intr(sc);
2771 goto done;
2772 }
2773 if (r1 & IWN_INT_CT_REACHED) {
2774 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
2775 __func__);
2776 }
2777 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2778 iwn_fatal_intr(sc);
2779 ifp->if_flags &= ~IFF_UP;
2780 iwn_stop_locked(sc);
2781 goto done;
2782 }
2783 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2784 (r2 & IWN_FH_INT_RX)) {
2785 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2786 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2787 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2788 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2789 IWN_INT_PERIODIC_DIS);
2790 iwn_notif_intr(sc);
2791 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2792 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2793 IWN_INT_PERIODIC_ENA);
2794 }
2795 } else
2796 iwn_notif_intr(sc);
2797 }
2798
2799 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2800 if (sc->sc_flags & IWN_FLAG_USE_ICT)
2801 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2802 wakeup(sc); /* FH DMA transfer completed. */
2803 }
2804
2805 if (r1 & IWN_INT_ALIVE)
2806 wakeup(sc); /* Firmware is alive. */
2807
2808 if (r1 & IWN_INT_WAKEUP)
2809 iwn_wakeup_intr(sc);
2810
2811done:
2812 /* Re-enable interrupts. */
2813 if (ifp->if_flags & IFF_UP)
2814 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2815
ffd7c74a
JT
2816}
2817
2818/*
2819 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2820 * 5000 adapters use a slightly different format.)
2821 */
2822static void
2823iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2824 uint16_t len)
2825{
2826 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2827
2828 *w = htole16(len + 8);
2829 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2830 BUS_DMASYNC_PREWRITE);
2831 if (idx < IWN_SCHED_WINSZ) {
2832 *(w + IWN_TX_RING_COUNT) = *w;
2833 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2834 BUS_DMASYNC_PREWRITE);
2835 }
2836}
2837
2838static void
2839iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2840 uint16_t len)
2841{
2842 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2843
2844 *w = htole16(id << 12 | (len + 8));
2845
2846 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2847 BUS_DMASYNC_PREWRITE);
2848 if (idx < IWN_SCHED_WINSZ) {
2849 *(w + IWN_TX_RING_COUNT) = *w;
2850 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2851 BUS_DMASYNC_PREWRITE);
2852 }
2853}
2854
2855#ifdef notyet
2856static void
2857iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2858{
2859 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2860
2861 *w = (*w & htole16(0xf000)) | htole16(1);
2862 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2863 BUS_DMASYNC_PREWRITE);
2864 if (idx < IWN_SCHED_WINSZ) {
2865 *(w + IWN_TX_RING_COUNT) = *w;
2866 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2867 BUS_DMASYNC_PREWRITE);
2868 }
2869}
2870#endif
2871
2872static uint8_t
2873iwn_plcp_signal(int rate) {
2874 int i;
2875
2876 for (i = 0; i < IWN_RIDX_MAX + 1; i++) {
2877 if (rate == iwn_rates[i].rate)
2878 return i;
2879 }
2880
2881 return 0;
2882}
2883
2884static int
2885iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
2886 struct iwn_tx_ring *ring)
2887{
2888 const struct iwn_hal *hal = sc->sc_hal;
2889 const struct ieee80211_txparam *tp;
2890 const struct iwn_rate *rinfo;
2891 struct ieee80211vap *vap = ni->ni_vap;
2892 struct ieee80211com *ic = ni->ni_ic;
2893 struct iwn_node *wn = (void *)ni;
2894 struct iwn_tx_desc *desc;
2895 struct iwn_tx_data *data;
2896 struct iwn_tx_cmd *cmd;
2897 struct iwn_cmd_data *tx;
2898 struct ieee80211_frame *wh;
2899 struct ieee80211_key *k = NULL;
2900 struct mbuf *mnew;
2901 bus_dma_segment_t segs[IWN_MAX_SCATTER];
2902 uint32_t flags;
2903 u_int hdrlen;
2904 int totlen, error, pad, nsegs = 0, i, rate;
2905 uint8_t ridx, type, txant;
2906
2907 IWN_LOCK_ASSERT(sc);
2908
2909 wh = mtod(m, struct ieee80211_frame *);
2910 hdrlen = ieee80211_anyhdrsize(wh);
2911 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2912
2913 desc = &ring->desc[ring->cur];
2914 data = &ring->data[ring->cur];
2915
2916 /* Choose a TX rate index. */
2917 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
2918 if (type == IEEE80211_FC0_TYPE_MGT)
2919 rate = tp->mgmtrate;
2920 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
2921 rate = tp->mcastrate;
2922 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2923 rate = tp->ucastrate;
2924 else {
2925 /* XXX pass pktlen */
3db796ac
JT
2926 ieee80211_ratectl_rate(ni, NULL, 0);
2927
ffd7c74a
JT
2928 rate = ni->ni_txrate;
2929 }
2930 ridx = iwn_plcp_signal(rate);
2931 rinfo = &iwn_rates[ridx];
2932
2933 /* Encrypt the frame if need be. */
2934 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2935 k = ieee80211_crypto_encap(ni, m);
2936 if (k == NULL) {
2937 m_freem(m);
2938 return ENOBUFS;
2939 }
2940 /* Packet header may have moved, reset our local pointer. */
2941 wh = mtod(m, struct ieee80211_frame *);
2942 }
2943 totlen = m->m_pkthdr.len;
2944
2945 if (ieee80211_radiotap_active_vap(vap)) {
2946 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2947
2948 tap->wt_flags = 0;
2949 tap->wt_rate = rinfo->rate;
2950 if (k != NULL)
2951 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2952
2953 ieee80211_radiotap_tx(vap, m);
2954 }
2955
2956 /* Prepare TX firmware command. */
2957 cmd = &ring->cmd[ring->cur];
2958 cmd->code = IWN_CMD_TX_DATA;
2959 cmd->flags = 0;
2960 cmd->qid = ring->qid;
2961 cmd->idx = ring->cur;
2962
2963 tx = (struct iwn_cmd_data *)cmd->data;
2964 /* NB: No need to clear tx, all fields are reinitialized here. */
2965 tx->scratch = 0; /* clear "scratch" area */
2966
2967 flags = 0;
2968 if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
2969 flags |= IWN_TX_NEED_ACK;
2970 if ((wh->i_fc[0] &
2971 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2972 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2973 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
2974
2975 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2976 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
2977
2978 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2979 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2980 /* NB: Group frames are sent using CCK in 802.11b/g. */
2981 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2982 flags |= IWN_TX_NEED_RTS;
2983 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2984 ridx >= IWN_RIDX_OFDM6) {
2985 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2986 flags |= IWN_TX_NEED_CTS;
2987 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2988 flags |= IWN_TX_NEED_RTS;
2989 }
2990 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2991 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2992 /* 5000 autoselects RTS/CTS or CTS-to-self. */
2993 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2994 flags |= IWN_TX_NEED_PROTECTION;
2995 } else
2996 flags |= IWN_TX_FULL_TXOP;
2997 }
2998 }
2999
3000 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3001 type != IEEE80211_FC0_TYPE_DATA)
3002 tx->id = hal->broadcast_id;
3003 else
3004 tx->id = wn->id;
3005
3006 if (type == IEEE80211_FC0_TYPE_MGT) {
3007 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3008
3009 /* Tell HW to set timestamp in probe responses. */
3010 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3011 flags |= IWN_TX_INSERT_TSTAMP;
3012
3013 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3014 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3015 tx->timeout = htole16(3);
3016 else
3017 tx->timeout = htole16(2);
3018 } else
3019 tx->timeout = htole16(0);
3020
3021 if (hdrlen & 3) {
3022 /* First segment length must be a multiple of 4. */
3023 flags |= IWN_TX_NEED_PADDING;
3024 pad = 4 - (hdrlen & 3);
3025 } else
3026 pad = 0;
3027
3028 tx->len = htole16(totlen);
3029 tx->tid = 0;
3030 tx->rts_ntries = 60;
3031 tx->data_ntries = 15;
3032 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3033 tx->plcp = rinfo->plcp;
3034 tx->rflags = rinfo->flags;
3035 if (tx->id == hal->broadcast_id) {
3036 /* Group or management frame. */
3037 tx->linkq = 0;
3038 /* XXX Alternate between antenna A and B? */
3039 txant = IWN_LSB(sc->txchainmask);
3040 tx->rflags |= IWN_RFLAG_ANT(txant);
3041 } else {
3042 tx->linkq = 0;
3043 flags |= IWN_TX_LINKQ; /* enable MRR */
3044 }
3045
3046 /* Set physical address of "scratch area". */
3047 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3048 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3049
3050 /* Copy 802.11 header in TX command. */
3051 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3052
3053 /* Trim 802.11 header. */
3054 m_adj(m, hdrlen);
3055 tx->security = 0;
3056 tx->flags = htole32(flags);
3057
3058 if (m->m_len > 0) {
3db796ac
JT
3059 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map,
3060 m, segs, IWN_MAX_SCATTER - 1, &nsegs, BUS_DMA_NOWAIT);
ffd7c74a
JT
3061 if (error == EFBIG) {
3062 /* too many fragments, linearize */
3db796ac 3063 mnew = m_defrag(m, MB_DONTWAIT);
ffd7c74a
JT
3064 if (mnew == NULL) {
3065 device_printf(sc->sc_dev,
3066 "%s: could not defrag mbuf\n", __func__);
3067 m_freem(m);
3068 return ENOBUFS;
3069 }
3070 m = mnew;
3db796ac
JT
3071 error = bus_dmamap_load_mbuf_segment(ring->data_dmat,
3072 data->map, m, segs, IWN_MAX_SCATTER - 1, &nsegs, BUS_DMA_NOWAIT);
ffd7c74a
JT
3073 }
3074 if (error != 0) {
3075 device_printf(sc->sc_dev,
3db796ac 3076 "%s: bus_dmamap_load_mbuf_segment failed, error %d\n",
ffd7c74a
JT
3077 __func__, error);
3078 m_freem(m);
3079 return error;
3080 }
3081 }
3082
3083 data->m = m;
3084 data->ni = ni;
3085
3086 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3087 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3088
3089 /* Fill TX descriptor. */
3090 desc->nsegs = 1 + nsegs;
3091 /* First DMA segment is used by the TX command. */
3092 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3093 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3094 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3095 /* Other DMA segments are for data payload. */
3096 for (i = 1; i <= nsegs; i++) {
3097 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3098 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3099 segs[i - 1].ds_len << 4);
3100 }
3101
3102 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3103 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3104 BUS_DMASYNC_PREWRITE);
3105 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3106 BUS_DMASYNC_PREWRITE);
3107
3108#ifdef notyet
3109 /* Update TX scheduler. */
3110 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3111#endif
3112
3113 /* Kick TX ring. */
3114 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3115 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3116
3117 /* Mark TX ring as full if we reach a certain threshold. */
3118 if (++ring->queued > IWN_TX_RING_HIMARK)
3119 sc->qfullmsk |= 1 << ring->qid;
3120
3121 return 0;
3122}
3123
3124static int
3125iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3126 struct ieee80211_node *ni, struct iwn_tx_ring *ring,
3127 const struct ieee80211_bpf_params *params)
3128{
3129 const struct iwn_hal *hal = sc->sc_hal;
3130 const struct iwn_rate *rinfo;
3131 struct ifnet *ifp = sc->sc_ifp;
3132 struct ieee80211vap *vap = ni->ni_vap;
3133 struct ieee80211com *ic = ifp->if_l2com;
3134 struct iwn_tx_cmd *cmd;
3135 struct iwn_cmd_data *tx;
3136 struct ieee80211_frame *wh;
3137 struct iwn_tx_desc *desc;
3138 struct iwn_tx_data *data;
3139 struct mbuf *mnew;
3140 bus_addr_t paddr;
3141 bus_dma_segment_t segs[IWN_MAX_SCATTER];
3142 uint32_t flags;
3143 u_int hdrlen;
3144 int totlen, error, pad, nsegs = 0, i, rate;
3145 uint8_t ridx, type, txant;
3146
3147 IWN_LOCK_ASSERT(sc);
3148
3149 wh = mtod(m, struct ieee80211_frame *);
3150 hdrlen = ieee80211_anyhdrsize(wh);
3151 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3152
3153 desc = &ring->desc[ring->cur];
3154 data = &ring->data[ring->cur];
3155
3156 /* Choose a TX rate index. */
3157 rate = params->ibp_rate0;
3158 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3159 /* XXX fall back to mcast/mgmt rate? */
3160 m_freem(m);
3161 return EINVAL;
3162 }
3163 ridx = iwn_plcp_signal(rate);
3164 rinfo = &iwn_rates[ridx];
3165
3166 totlen = m->m_pkthdr.len;
3167
3168 /* Prepare TX firmware command. */
3169 cmd = &ring->cmd[ring->cur];
3170 cmd->code = IWN_CMD_TX_DATA;
3171 cmd->flags = 0;
3172 cmd->qid = ring->qid;
3173 cmd->idx = ring->cur;
3174
3175 tx = (struct iwn_cmd_data *)cmd->data;
3176 /* NB: No need to clear tx, all fields are reinitialized here. */
3177 tx->scratch = 0; /* clear "scratch" area */
3178
3179 flags = 0;
3180 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3181 flags |= IWN_TX_NEED_ACK;
3182 if (params->ibp_flags & IEEE80211_BPF_RTS) {
3183 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3184 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3185 flags &= ~IWN_TX_NEED_RTS;
3186 flags |= IWN_TX_NEED_PROTECTION;
3187 } else
3188 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3189 }
3190 if (params->ibp_flags & IEEE80211_BPF_CTS) {
3191 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3192 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3193 flags &= ~IWN_TX_NEED_CTS;
3194 flags |= IWN_TX_NEED_PROTECTION;
3195 } else
3196 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3197 }
3198 if (type == IEEE80211_FC0_TYPE_MGT) {
3199 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3200
3201 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3202 flags |= IWN_TX_INSERT_TSTAMP;
3203
3204 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3205 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3206 tx->timeout = htole16(3);
3207 else
3208 tx->timeout = htole16(2);
3209 } else
3210 tx->timeout = htole16(0);
3211
3212 if (hdrlen & 3) {
3213 /* First segment length must be a multiple of 4. */
3214 flags |= IWN_TX_NEED_PADDING;
3215 pad = 4 - (hdrlen & 3);
3216 } else
3217 pad = 0;
3218
3219 if (ieee80211_radiotap_active_vap(vap)) {
3220 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3221
3222 tap->wt_flags = 0;
3223 tap->wt_rate = rate;
3224
3225 ieee80211_radiotap_tx(vap, m);
3226 }
3227
3228 tx->len = htole16(totlen);
3229 tx->tid = 0;
3230 tx->id = hal->broadcast_id;
3231 tx->rts_ntries = params->ibp_try1;
3232 tx->data_ntries = params->ibp_try0;
3233 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3234 tx->plcp = rinfo->plcp;
3235 tx->rflags = rinfo->flags;
3236 /* Group or management frame. */
3237 tx->linkq = 0;
3238 txant = IWN_LSB(sc->txchainmask);
3239 tx->rflags |= IWN_RFLAG_ANT(txant);
3240 /* Set physical address of "scratch area". */
3241 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd);
3242 tx->loaddr = htole32(IWN_LOADDR(paddr));
3243 tx->hiaddr = IWN_HIADDR(paddr);
3244
3245 /* Copy 802.11 header in TX command. */
3246 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3247
3248 /* Trim 802.11 header. */
3249 m_adj(m, hdrlen);
3250 tx->security = 0;
3251 tx->flags = htole32(flags);
3252
3253 if (m->m_len > 0) {
3db796ac
JT
3254 error = bus_dmamap_load_mbuf_segment(ring->data_dmat, data->map,
3255 m, segs, IWN_MAX_SCATTER - 1, &nsegs, BUS_DMA_NOWAIT);
ffd7c74a
JT
3256 if (error == EFBIG) {
3257 /* Too many fragments, linearize. */
3db796ac 3258 mnew = m_defrag(m, MB_DONTWAIT);
ffd7c74a
JT
3259 if (mnew == NULL) {
3260 device_printf(sc->sc_dev,
3261 "%s: could not defrag mbuf\n", __func__);
3262 m_freem(m);
3263 return ENOBUFS;
3264 }
3265 m = mnew;
3db796ac
JT
3266 error = bus_dmamap_load_mbuf_segment(ring->data_dmat,
3267 data->map, m, segs, IWN_MAX_SCATTER - 1, &nsegs, BUS_DMA_NOWAIT);
ffd7c74a
JT
3268 }
3269 if (error != 0) {
3270 device_printf(sc->sc_dev,
3db796ac 3271 "%s: bus_dmamap_load_mbuf_segment failed, error %d\n",
ffd7c74a
JT
3272 __func__, error);
3273 m_freem(m);
3274 return error;
3275 }
3276 }
3277
3278 data->m = m;
3279 data->ni = ni;
3280
3281 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3282 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3283
3284 /* Fill TX descriptor. */
3285 desc->nsegs = 1 + nsegs;
3286 /* First DMA segment is used by the TX command. */
3287 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3288 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3289 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3290 /* Other DMA segments are for data payload. */
3291 for (i = 1; i <= nsegs; i++) {
3292 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3293 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3294 segs[i - 1].ds_len << 4);
3295 }
3296
3297 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3298 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3299 BUS_DMASYNC_PREWRITE);
3300 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3301 BUS_DMASYNC_PREWRITE);
3302
3303#ifdef notyet
3304 /* Update TX scheduler. */
3305 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3306#endif
3307
3308 /* Kick TX ring. */
3309 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3310 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3311
3312 /* Mark TX ring as full if we reach a certain threshold. */
3313 if (++ring->queued > IWN_TX_RING_HIMARK)
3314 sc->qfullmsk |= 1 << ring->qid;
3315
3316 return 0;
3317}
3318
3319static int
3320iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3321 const struct ieee80211_bpf_params *params)
3322{
3323 struct ieee80211com *ic = ni->ni_ic;
3324 struct ifnet *ifp = ic->ic_ifp;
3325 struct iwn_softc *sc = ifp->if_softc;
3326 struct iwn_tx_ring *txq;
3327 int error = 0;
3328
3db796ac 3329 if ((ifp->if_flags & IFF_RUNNING) == 0) {
ffd7c74a
JT
3330 ieee80211_free_node(ni);
3331 m_freem(m);
3332 return ENETDOWN;
3333 }
3334
3335 IWN_LOCK(sc);
3336 if (params == NULL)
3337 txq = &sc->txq[M_WME_GETAC(m)];
3338 else
3339 txq = &sc->txq[params->ibp_pri & 3];
3340
3341 if (params == NULL) {
3342 /*
3343 * Legacy path; interpret frame contents to decide
3344 * precisely how to send the frame.
3345 */
3346 error = iwn_tx_data(sc, m, ni, txq);
3347 } else {
3348 /*
3349 * Caller supplied explicit parameters to use in
3350 * sending the frame.
3351 */
3352 error = iwn_tx_data_raw(sc, m, ni, txq, params);
3353 }
3354 if (error != 0) {
3355 /* NB: m is reclaimed on tx failure */
3356 ieee80211_free_node(ni);
3357 ifp->if_oerrors++;
3358 }
3359 IWN_UNLOCK(sc);
3360 return error;
3361}
3362
3363static void
3364iwn_start(struct ifnet *ifp)
3365{
3366 struct iwn_softc *sc = ifp->if_softc;
3367
3db796ac
JT
3368 IWN_LOCK_ASSERT(sc);
3369
ffd7c74a 3370 iwn_start_locked(ifp);
ffd7c74a
JT
3371}
3372
3373static void
3374iwn_start_locked(struct ifnet *ifp)
3375{
3376 struct iwn_softc *sc = ifp->if_softc;
3377 struct ieee80211_node *ni;
3378 struct iwn_tx_ring *txq;
3379 struct mbuf *m;
3380 int pri;
3381
3382 IWN_LOCK_ASSERT(sc);
3383
3384 for (;;) {
3385 if (sc->qfullmsk != 0) {
3db796ac 3386 ifp->if_flags |= IFF_OACTIVE;
ffd7c74a
JT
3387 break;
3388 }
3db796ac 3389 m = ifq_dequeue(&ifp->if_snd, NULL);
ffd7c74a
JT
3390 if (m == NULL)
3391 break;
3392 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3393 pri = M_WME_GETAC(m);
3394 txq = &sc->txq[pri];
3395 if (iwn_tx_data(sc, m, ni, txq) != 0) {
3396 ifp->if_oerrors++;
3397 ieee80211_free_node(ni);
3398 break;
3399 }
3400 sc->sc_tx_timer = 5;
3401 }
3402}
3403
3404static void
3405iwn_watchdog(struct iwn_softc *sc)
3406{
3407 if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
3408 struct ifnet *ifp = sc->sc_ifp;
3409 struct ieee80211com *ic = ifp->if_l2com;
3410
3411 if_printf(ifp, "device timeout\n");
3412 ieee80211_runtask(ic, &sc->sc_reinit_task);
3413 }
3414}
3415
3416static int
3db796ac 3417iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *ucred)
ffd7c74a
JT
3418{
3419 struct iwn_softc *sc = ifp->if_softc;
3420 struct ieee80211com *ic = ifp->if_l2com;
3421 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3422 struct ifreq *ifr = (struct ifreq *) data;
3423 int error = 0, startall = 0, stop = 0;
3424
3425 switch (cmd) {
3426 case SIOCSIFFLAGS:
3427 IWN_LOCK(sc);
3428 if (ifp->if_flags & IFF_UP) {
3db796ac 3429 if (!(ifp->if_flags & IFF_RUNNING)) {
ffd7c74a
JT
3430 iwn_init_locked(sc);
3431 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3432 startall = 1;
3433 else
3434 stop = 1;
3435 }
3436 } else {
3db796ac 3437 if (ifp->if_flags & IFF_RUNNING)
ffd7c74a
JT
3438 iwn_stop_locked(sc);
3439 }
3440 IWN_UNLOCK(sc);
3441 if (startall)
3442 ieee80211_start_all(ic);
3443 else if (vap != NULL && stop)
3444 ieee80211_stop(vap);
3445 break;
3446 case SIOCGIFMEDIA:
3447 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3448 break;
3449 case SIOCGIFADDR:
3450 error = ether_ioctl(ifp, cmd, data);
3451 break;
3452 default:
3453 error = EINVAL;
3454 break;
3455 }
3456 return error;
3457}
3458
3459/*
3460 * Send a command to the firmware.
3461 */
3462static int
3463iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3464{
3465 struct iwn_tx_ring *ring = &sc->txq[4];
3466 struct iwn_tx_desc *desc;
3467 struct iwn_tx_data *data;
3468 struct iwn_tx_cmd *cmd;
3469 struct mbuf *m;
3470 bus_addr_t paddr;
3471 int totlen, error;
3472
3473 IWN_LOCK_ASSERT(sc);
3474
3475 desc = &ring->desc[ring->cur];
3476 data = &ring->data[ring->cur];
3477 totlen = 4 + size;
3478
3479 if (size > sizeof cmd->data) {
3480 /* Command is too large to fit in a descriptor. */
3481 if (totlen > MCLBYTES)
3482 return EINVAL;
3db796ac 3483 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
ffd7c74a
JT
3484 if (m == NULL)
3485 return ENOMEM;
3486 cmd = mtod(m, struct iwn_tx_cmd *);
3487 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3488 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3489 if (error != 0) {
3490 m_freem(m);
3491 return error;
3492 }
3493 data->m = m;
3494 } else {
3495 cmd = &ring->cmd[ring->cur];
3496 paddr = data->cmd_paddr;
3497 }
3498
3499 cmd->code = code;
3500 cmd->flags = 0;
3501 cmd->qid = ring->qid;
3502 cmd->idx = ring->cur;
3503 memcpy(cmd->data, buf, size);
3504
3505 desc->nsegs = 1;
3506 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3507 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
3508
3509 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3510 __func__, iwn_intr_str(cmd->code), cmd->code,
3511 cmd->flags, cmd->qid, cmd->idx);
3512
3513 if (size > sizeof cmd->data) {
3514 bus_dmamap_sync(ring->data_dmat, data->map,
3515 BUS_DMASYNC_PREWRITE);
3516 } else {
3517 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3518 BUS_DMASYNC_PREWRITE);
3519 }
3520 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3521 BUS_DMASYNC_PREWRITE);
3522
3523#ifdef notyet
3524 /* Update TX scheduler. */
3525 sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0);
3526#endif
3527
3528 /* Kick command ring. */
3529 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3530 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3531
3db796ac 3532 return async ? 0 : zsleep(desc, sc->sc_ifp->if_serializer, 0, "iwncmd", hz);
ffd7c74a
JT
3533}
3534
3535static int
3536iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3537{
3538 struct iwn4965_node_info hnode;
3539 caddr_t src, dst;
3540
3541 /*
3542 * We use the node structure for 5000 Series internally (it is
3543 * a superset of the one for 4965AGN). We thus copy the common
3544 * fields before sending the command.
3545 */
3546 src = (caddr_t)node;
3547 dst = (caddr_t)&hnode;
3548 memcpy(dst, src, 48);
3549 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3550 memcpy(dst + 48, src + 72, 20);
3551 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3552}
3553
3554static int
3555iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3556{
3557 /* Direct mapping. */
3558 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3559}
3560
3561#if 0 /* HT */
3562static const uint8_t iwn_ridx_to_plcp[] = {
3563 10, 20, 55, 110, /* CCK */
3564 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */
3565};
3566static const uint8_t iwn_siso_mcs_to_plcp[] = {
3567 0, 0, 0, 0, /* CCK */
3568 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */
3569};
3570static const uint8_t iwn_mimo_mcs_to_plcp[] = {
3571 0, 0, 0, 0, /* CCK */
3572 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */
3573};
3574#endif
3575static const uint8_t iwn_prev_ridx[] = {
3576 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */
3577 0, 0, 1, 5, /* CCK */
3578 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */
3579};
3580
3581/*
3582 * Configure hardware link parameters for the specified
3583 * node operating on the specified channel.
3584 */
3585static int
3586iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async)
3587{
3588 struct ifnet *ifp = sc->sc_ifp;
3589 struct ieee80211com *ic = ifp->if_l2com;
3590 struct iwn_cmd_link_quality linkq;
3591 const struct iwn_rate *rinfo;
3592 int i;
3593 uint8_t txant, ridx;
3594
3595 /* Use the first valid TX antenna. */
3596 txant = IWN_LSB(sc->txchainmask);
3597
3598 memset(&linkq, 0, sizeof linkq);
3599 linkq.id = id;
3600 linkq.antmsk_1stream = txant;
3601 linkq.antmsk_2stream = IWN_ANT_AB;
3602 linkq.ampdu_max = 31;
3603 linkq.ampdu_threshold = 3;
3604 linkq.ampdu_limit = htole16(4000); /* 4ms */
3605
3606#if 0 /* HT */
3607 if (IEEE80211_IS_CHAN_HT(c))
3608 linkq.mimo = 1;
3609#endif
3610
3611 if (id == IWN_ID_BSS)
3612 ridx = IWN_RIDX_OFDM54;
3613 else if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
3614 ridx = IWN_RIDX_OFDM6;
3615 else
3616 ridx = IWN_RIDX_CCK1;
3617
3618 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3619 rinfo = &iwn_rates[ridx];
3620#if 0 /* HT */
3621 if (IEEE80211_IS_CHAN_HT40(c)) {
3622 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx]
3623 | IWN_RIDX_MCS;
3624 linkq.retry[i].rflags = IWN_RFLAG_HT
3625 | IWN_RFLAG_HT40;
3626 /* XXX shortGI */
3627 } else if (IEEE80211_IS_CHAN_HT(c)) {
3628 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx]
3629 | IWN_RIDX_MCS;
3630 linkq.retry[i].rflags = IWN_RFLAG_HT;
3631 /* XXX shortGI */
3632 } else
3633#endif
3634 {
3635 linkq.retry[i].plcp = rinfo->plcp;
3636 linkq.retry[i].rflags = rinfo->flags;
3637 }
3638 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3639 ridx = iwn_prev_ridx[ridx];
3640 }
3641#ifdef IWN_DEBUG
3642 if (sc->sc_debug & IWN_DEBUG_STATE) {
3db796ac 3643 kprintf("%s: set link quality for node %d, mimo %d ssmask %d\n",
ffd7c74a 3644 __func__, id, linkq.mimo, linkq.antmsk_1stream);
3db796ac 3645 kprintf("%s:", __func__);
ffd7c74a 3646 for (i = 0; i < IWN_MAX_TX_RETRIES; i++)
3db796ac 3647 kprintf(" %d:%x", linkq.retry[i].plcp,
ffd7c74a 3648 linkq.retry[i].rflags);
3db796ac 3649 kprintf("\n");
ffd7c74a
JT
3650 }
3651#endif
3652 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3653}
3654
3655/*
3656 * Broadcast node is used to send group-addressed and management frames.
3657 */
3658static int
3659iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3660{
3661 const struct iwn_hal *hal = sc->sc_hal;
3662 struct ifnet *ifp = sc->sc_ifp;
3663 struct iwn_node_info node;
3664 int error;
3665
3666 memset(&node, 0, sizeof node);
3667 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
3668 node.id = hal->broadcast_id;
3669 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
3670 error = hal->add_node(sc, &node, async);
3671 if (error != 0)
3672 return error;
3673
3674 error = iwn_set_link_quality(sc, hal->broadcast_id, async);
3675 return error;
3676}
3677
3678static int
3679iwn_wme_update(struct ieee80211com *ic)
3680{
3681#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
3682#define IWN_TXOP_TO_US(v) (v<<5)
3683 struct iwn_softc *sc = ic->ic_ifp->if_softc;
3684 struct iwn_edca_params cmd;
3685 int i;
3686
3687 memset(&cmd, 0, sizeof cmd);
3688 cmd.flags = htole32(IWN_EDCA_UPDATE);
3689 for (i = 0; i < WME_NUM_AC; i++) {
3690 const struct wmeParams *wmep =
3691 &ic->ic_wme.wme_chanParams.cap_wmeParams[i];
3692 cmd.ac[i].aifsn = wmep->wmep_aifsn;
3693 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin));
3694 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax));
3695 cmd.ac[i].txoplimit =
3696 htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit));
3697 }
3698 IEEE80211_UNLOCK(ic);
3699 IWN_LOCK(sc);
3700 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/);
3701 IWN_UNLOCK(sc);
3702 IEEE80211_LOCK(ic);
3703 return 0;
3704#undef IWN_TXOP_TO_US
3705#undef IWN_EXP2
3706}
3707
3708static void
3709iwn_update_mcast(struct ifnet *ifp)
3710{
3711 /* Ignore */
3712}
3713
3714static void
3715iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3716{
3717 struct iwn_cmd_led led;
3718
3719 /* Clear microcode LED ownership. */
3720 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3721
3722 led.which = which;
3723 led.unit = htole32(10000); /* on/off in unit of 100ms */
3724 led.off = off;
3725 led.on = on;
3726 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3727}
3728
3729/*
3730 * Set the critical temperature at which the firmware will stop the radio
3731 * and notify us.
3732 */
3733static int
3734iwn_set_critical_temp(struct iwn_softc *sc)
3735{
3736 struct iwn_critical_temp crit;
3737 int32_t temp;
3738
3739 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3740
3741 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3742 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3743 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3744 temp = IWN_CTOK(110);
3745 else
3746 temp = 110;
3747 memset(&crit, 0, sizeof crit);
3748 crit.tempR = htole32(temp);
3749 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n",
3750 temp);
3751 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3752}
3753
3754static int
3755iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3756{
3757 struct iwn_cmd_timing cmd;
3758 uint64_t val, mod;
3759
3760 memset(&cmd, 0, sizeof cmd);
3761 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3762 cmd.bintval = htole16(ni->ni_intval);
3763 cmd.lintval = htole16(10);
3764
3765 /* Compute remaining time until next beacon. */
3766 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */
3767 mod = le64toh(cmd.tstamp) % val;
3768 cmd.binitval = htole32((uint32_t)(val - mod));
3769
3770 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3771 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3772
3773 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3774}
3775
3776static void
3777iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3778{
3779 struct ifnet *ifp = sc->sc_ifp;
3780 struct ieee80211com *ic = ifp->if_l2com;
3781
3782 /* Adjust TX power if need be (delta >= 3 degC.) */
3783 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
3784 __func__, sc->temp, temp);
3785 if (abs(temp - sc->temp) >= 3) {
3786 /* Record temperature of last calibration. */
3787 sc->temp = temp;
3788 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
3789 }
3790}
3791
3792/*
3793 * Set TX power for current channel (each rate has its own power settings).
3794 * This function takes into account the regulatory information from EEPROM,
3795 * the current temperature and the current voltage.
3796 */
3797static int
3798iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3799 int async)
3800{
3801/* Fixed-point arithmetic division using a n-bit fractional part. */
3802#define fdivround(a, b, n) \
3803 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3804/* Linear interpolation. */
3805#define interpolate(x, x1, y1, x2, y2, n) \
3806 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3807
3808 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3809 struct ifnet *ifp = sc->sc_ifp;
3810 struct ieee80211com *ic = ifp->if_l2com;
3811 struct iwn_ucode_info *uc = &sc->ucode_info;
3812 struct iwn4965_cmd_txpower cmd;
3813 struct iwn4965_eeprom_chan_samples *chans;
3814 int32_t vdiff, tdiff;
3815 int i, c, grp, maxpwr;
3816 const uint8_t *rf_gain, *dsp_gain;
3817 uint8_t chan;
3818
3819 /* Retrieve channel number. */
3820 chan = ieee80211_chan2ieee(ic, ch);
3821 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
3822 chan);
3823
3824 memset(&cmd, 0, sizeof cmd);
3825 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3826 cmd.chan = chan;
3827
3828 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3829 maxpwr = sc->maxpwr5GHz;
3830 rf_gain = iwn4965_rf_gain_5ghz;
3831 dsp_gain = iwn4965_dsp_gain_5ghz;
3832 } else {
3833 maxpwr = sc->maxpwr2GHz;
3834 rf_gain = iwn4965_rf_gain_2ghz;
3835 dsp_gain = iwn4965_dsp_gain_2ghz;
3836 }
3837
3838 /* Compute voltage compensation. */
3839 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3840 if (vdiff > 0)
3841 vdiff *= 2;
3842 if (abs(vdiff) > 2)
3843 vdiff = 0;
3844 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3845 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3846 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
3847
3848 /* Get channel attenuation group. */
3849 if (chan <= 20) /* 1-20 */
3850 grp = 4;
3851 else if (chan <= 43) /* 34-43 */
3852 grp = 0;
3853 else if (chan <= 70) /* 44-70 */
3854 grp = 1;
3855 else if (chan <= 124) /* 71-124 */
3856 grp = 2;
3857 else /* 125-200 */
3858 grp = 3;
3859 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3860 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
3861
3862 /* Get channel sub-band. */
3863 for (i = 0; i < IWN_NBANDS; i++)
3864 if (sc->bands[i].lo != 0 &&
3865 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3866 break;
3867 if (i == IWN_NBANDS) /* Can't happen in real-life. */
3868 return EINVAL;
3869 chans = sc->bands[i].chans;
3870 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3871 "%s: chan %d sub-band=%d\n", __func__, chan, i);
3872
3873 for (c = 0; c < 2; c++) {
3874 uint8_t power, gain, temp;
3875 int maxchpwr, pwr, ridx, idx;
3876
3877 power = interpolate(chan,
3878 chans[0].num, chans[0].samples[c][1].power,
3879 chans[1].num, chans[1].samples[c][1].power, 1);
3880 gain = interpolate(chan,
3881 chans[0].num, chans[0].samples[c][1].gain,
3882 chans[1].num, chans[1].samples[c][1].gain, 1);
3883 temp = interpolate(chan,
3884 chans[0].num, chans[0].samples[c][1].temp,
3885 chans[1].num, chans[1].samples[c][1].temp, 1);
3886 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3887 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
3888 __func__, c, power, gain, temp);
3889
3890 /* Compute temperature compensation. */
3891 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3892 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3893 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
3894 __func__, tdiff, sc->temp, temp);
3895
3896 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3897 /* Convert dBm to half-dBm. */
3898 maxchpwr = sc->maxpwr[chan] * 2;
3899 if ((ridx / 8) & 1)
3900 maxchpwr -= 6; /* MIMO 2T: -3dB */
3901
3902 pwr = maxpwr;
3903
3904 /* Adjust TX power based on rate. */
3905 if ((ridx % 8) == 5)
3906 pwr -= 15; /* OFDM48: -7.5dB */
3907 else if ((ridx % 8) == 6)
3908 pwr -= 17; /* OFDM54: -8.5dB */
3909 else if ((ridx % 8) == 7)
3910 pwr -= 20; /* OFDM60: -10dB */
3911 else
3912 pwr -= 10; /* Others: -5dB */
3913
3914 /* Do not exceed channel max TX power. */
3915 if (pwr > maxchpwr)
3916 pwr = maxchpwr;
3917
3918 idx = gain - (pwr - power) - tdiff - vdiff;
3919 if ((ridx / 8) & 1) /* MIMO */
3920 idx += (int32_t)le32toh(uc->atten[grp][c]);
3921
3922 if (cmd.band == 0)
3923 idx += 9; /* 5GHz */
3924 if (ridx == IWN_RIDX_MAX)
3925 idx += 5; /* CCK */
3926
3927 /* Make sure idx stays in a valid range. */
3928 if (idx < 0)
3929 idx = 0;
3930 else if (idx > IWN4965_MAX_PWR_INDEX)
3931 idx = IWN4965_MAX_PWR_INDEX;
3932
3933 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3934 "%s: Tx chain %d, rate idx %d: power=%d\n",
3935 __func__, c, ridx, idx);
3936 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3937 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3938 }
3939 }
3940
3941 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3942 "%s: set tx power for chan %d\n", __func__, chan);
3943 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3944
3945#undef interpolate
3946#undef fdivround
3947}
3948
3949static int
3950iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3951 int async)
3952{
3953 struct iwn5000_cmd_txpower cmd;
3954
3955 /*
3956 * TX power calibration is handled automatically by the firmware
3957 * for 5000 Series.
3958 */
3959 memset(&cmd, 0, sizeof cmd);
3960 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
3961 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3962 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3963 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
3964 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3965}
3966
3967/*
3968 * Retrieve the maximum RSSI (in dBm) among receivers.
3969 */
3970static int
3971iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3972{
3973 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
3974 uint8_t mask, agc;
3975 int rssi;
3976
3977 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3978 agc = (le16toh(phy->agc) >> 7) & 0x7f;
3979
3980 rssi = 0;
3981#if 0
3982 if (mask & IWN_ANT_A) /* Ant A */
3983 rssi = max(rssi, phy->rssi[0]);
3984 if (mask & IWN_ATH_B) /* Ant B */
3985 rssi = max(rssi, phy->rssi[2]);
3986 if (mask & IWN_ANT_C) /* Ant C */
3987 rssi = max(rssi, phy->rssi[4]);
3988#else
3989 rssi = max(rssi, phy->rssi[0]);
3990 rssi = max(rssi, phy->rssi[2]);
3991 rssi = max(rssi, phy->rssi[4]);
3992#endif
3993
3994 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d "
3995 "result %d\n", __func__, agc, mask,
3996 phy->rssi[0], phy->rssi[2], phy->rssi[4],
3997 rssi - agc - IWN_RSSI_TO_DBM);
3998 return rssi - agc - IWN_RSSI_TO_DBM;
3999}
4000
4001static int
4002iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4003{
4004 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4005 int rssi;
4006 uint8_t agc;
4007
4008 agc = (le32toh(phy->agc) >> 9) & 0x7f;
4009
4010 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4011 le16toh(phy->rssi[1]) & 0xff);
4012 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4013
4014 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d "
4015 "result %d\n", __func__, agc,
4016 phy->rssi[0], phy->rssi[1], phy->rssi[2],
4017 rssi - agc - IWN_RSSI_TO_DBM);
4018 return rssi - agc - IWN_RSSI_TO_DBM;
4019}
4020
4021/*
4022 * Retrieve the average noise (in dBm) among receivers.
4023 */
4024static int
4025iwn_get_noise(const struct iwn_rx_general_stats *stats)
4026{
4027 int i, total, nbant, noise;
4028
4029 total = nbant = 0;
4030 for (i = 0; i < 3; i++) {
4031 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4032 continue;
4033 total += noise;
4034 nbant++;
4035 }
4036 /* There should be at least one antenna but check anyway. */
4037 return (nbant == 0) ? -127 : (total / nbant) - 107;
4038}
4039
4040/*
4041 * Compute temperature (in degC) from last received statistics.
4042 */
4043static int
4044iwn4965_get_temperature(struct iwn_softc *sc)
4045{
4046 struct iwn_ucode_info *uc = &sc->ucode_info;
4047 int32_t r1, r2, r3, r4, temp;
4048
4049 r1 = le32toh(uc->temp[0].chan20MHz);
4050 r2 = le32toh(uc->temp[1].chan20MHz);
4051 r3 = le32toh(uc->temp[2].chan20MHz);
4052 r4 = le32toh(sc->rawtemp);
4053
4054 if (r1 == r3) /* Prevents division by 0 (should not happen.) */
4055 return 0;
4056
4057 /* Sign-extend 23-bit R4 value to 32-bit. */
4058 r4 = (r4 << 8) >> 8;
4059 /* Compute temperature in Kelvin. */
4060 temp = (259 * (r4 - r2)) / (r3 - r1);
4061 temp = (temp * 97) / 100 + 8;
4062