2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/bwi/if_bwi.c,v 1.2 2007/09/15 09:59:29 sephe Exp $
37 #include <sys/param.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
49 #include <net/ethernet.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/ifq_var.h>
57 #include <netproto/802_11/ieee80211_var.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
64 #include "if_bwireg.h"
65 #include "if_bwivar.h"
69 struct bwi_clock_freq {
74 struct bwi_myaddr_bssid {
75 uint8_t myaddr[IEEE80211_ADDR_LEN];
76 uint8_t bssid[IEEE80211_ADDR_LEN];
79 static int bwi_probe(device_t);
80 static int bwi_attach(device_t);
81 static int bwi_detach(device_t);
82 static int bwi_shutdown(device_t);
84 static void bwi_init(void *);
85 static int bwi_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86 static void bwi_start(struct ifnet *);
87 static void bwi_watchdog(struct ifnet *);
88 static int bwi_newstate(struct ieee80211com *, enum ieee80211_state, int);
89 static void bwi_updateslot(struct ifnet *);
90 static int bwi_media_change(struct ifnet *);
92 static void bwi_next_scan(void *);
93 static void bwi_calibrate(void *);
95 static int bwi_stop(struct bwi_softc *);
96 static int bwi_newbuf(struct bwi_softc *, int, int);
97 static int bwi_encap(struct bwi_softc *, int, struct mbuf *,
98 struct ieee80211_node *);
100 static void bwi_init_rxdesc_ring32(struct bwi_softc *, uint32_t,
101 bus_addr_t, int, int);
102 static void bwi_reset_rx_ring32(struct bwi_softc *, uint32_t);
104 static int bwi_init_tx_ring32(struct bwi_softc *, int);
105 static int bwi_init_rx_ring32(struct bwi_softc *);
106 static int bwi_init_txstats32(struct bwi_softc *);
107 static void bwi_free_tx_ring32(struct bwi_softc *, int);
108 static void bwi_free_rx_ring32(struct bwi_softc *);
109 static void bwi_free_txstats32(struct bwi_softc *);
110 static void bwi_setup_rx_desc32(struct bwi_softc *, int, bus_addr_t, int);
111 static void bwi_setup_tx_desc32(struct bwi_softc *, struct bwi_ring_data *,
112 int, bus_addr_t, int);
113 static void bwi_rxeof32(struct bwi_softc *);
114 static void bwi_start_tx32(struct bwi_softc *, uint32_t, int);
115 static void bwi_txeof_status32(struct bwi_softc *);
117 static int bwi_init_tx_ring64(struct bwi_softc *, int);
118 static int bwi_init_rx_ring64(struct bwi_softc *);
119 static int bwi_init_txstats64(struct bwi_softc *);
120 static void bwi_free_tx_ring64(struct bwi_softc *, int);
121 static void bwi_free_rx_ring64(struct bwi_softc *);
122 static void bwi_free_txstats64(struct bwi_softc *);
123 static void bwi_setup_rx_desc64(struct bwi_softc *, int, bus_addr_t, int);
124 static void bwi_setup_tx_desc64(struct bwi_softc *, struct bwi_ring_data *,
125 int, bus_addr_t, int);
126 static void bwi_rxeof64(struct bwi_softc *);
127 static void bwi_start_tx64(struct bwi_softc *, uint32_t, int);
128 static void bwi_txeof_status64(struct bwi_softc *);
130 static void bwi_intr(void *);
131 static void bwi_rxeof(struct bwi_softc *, int);
132 static void _bwi_txeof(struct bwi_softc *, uint16_t);
133 static void bwi_txeof(struct bwi_softc *);
134 static void bwi_txeof_status(struct bwi_softc *, int);
135 static void bwi_enable_intrs(struct bwi_softc *, uint32_t);
136 static void bwi_disable_intrs(struct bwi_softc *, uint32_t);
137 static int bwi_calc_rssi(struct bwi_softc *, const struct bwi_rxbuf_hdr *);
139 static int bwi_dma_alloc(struct bwi_softc *);
140 static void bwi_dma_free(struct bwi_softc *);
141 static int bwi_dma_ring_alloc(struct bwi_softc *, bus_dma_tag_t,
142 struct bwi_ring_data *, bus_size_t,
144 static int bwi_dma_mbuf_create(struct bwi_softc *);
145 static void bwi_dma_mbuf_destroy(struct bwi_softc *, int, int);
146 static int bwi_dma_txstats_alloc(struct bwi_softc *, uint32_t, bus_size_t);
147 static void bwi_dma_txstats_free(struct bwi_softc *);
148 static void bwi_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
149 static void bwi_dma_buf_addr(void *, bus_dma_segment_t *, int,
152 static void bwi_power_on(struct bwi_softc *, int);
153 static int bwi_power_off(struct bwi_softc *, int);
154 static int bwi_set_clock_mode(struct bwi_softc *, enum bwi_clock_mode);
155 static int bwi_set_clock_delay(struct bwi_softc *);
156 static void bwi_get_clock_freq(struct bwi_softc *, struct bwi_clock_freq *);
157 static int bwi_get_pwron_delay(struct bwi_softc *sc);
158 static void bwi_set_addr_filter(struct bwi_softc *, uint16_t,
160 static void bwi_set_bssid(struct bwi_softc *, const uint8_t *);
161 static int bwi_set_chan(struct bwi_softc *, struct ieee80211_channel *);
163 static void bwi_get_card_flags(struct bwi_softc *);
164 static void bwi_get_eaddr(struct bwi_softc *, uint16_t, uint8_t *);
166 static int bwi_bus_attach(struct bwi_softc *);
167 static int bwi_bbp_attach(struct bwi_softc *);
168 static int bwi_bbp_power_on(struct bwi_softc *, enum bwi_clock_mode);
169 static void bwi_bbp_power_off(struct bwi_softc *);
171 static const char *bwi_regwin_name(const struct bwi_regwin *);
172 static uint32_t bwi_regwin_disable_bits(struct bwi_softc *);
173 static void bwi_regwin_info(struct bwi_softc *, uint16_t *, uint8_t *);
174 static int bwi_regwin_select(struct bwi_softc *, int);
176 static const struct bwi_dev {
181 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4301,
182 "Broadcom BCM4301 802.11 Wireless Lan" },
184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4307,
185 "Broadcom BCM4307 802.11 Wireless Lan" },
187 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4311,
188 "Broadcom BCM4311 802.11 Wireless Lan" },
190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4312,
191 "Broadcom BCM4312 802.11 Wireless Lan" },
193 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4306_1,
194 "Broadcom BCM4306 802.11 Wireless Lan" },
196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4306_2,
197 "Broadcom BCM4306 802.11 Wireless Lan" },
199 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4306_3,
200 "Broadcom BCM4306 802.11 Wireless Lan" },
202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4309,
203 "Broadcom BCM4309 802.11 Wireless Lan" },
205 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4318,
206 "Broadcom BCM4318 802.11 Wireless Lan" },
208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4319,
209 "Broadcom BCM4319 802.11 Wireless Lan" }
212 static device_method_t bwi_methods[] = {
213 DEVMETHOD(device_probe, bwi_probe),
214 DEVMETHOD(device_attach, bwi_attach),
215 DEVMETHOD(device_detach, bwi_detach),
216 DEVMETHOD(device_shutdown, bwi_shutdown),
218 DEVMETHOD(device_suspend, bwi_suspend),
219 DEVMETHOD(device_resume, bwi_resume),
224 static driver_t bwi_driver = {
227 sizeof(struct bwi_softc)
230 static devclass_t bwi_devclass;
232 DRIVER_MODULE(bwi, pci, bwi_driver, bwi_devclass, 0, 0);
233 DRIVER_MODULE(bwi, cardbus, bwi_driver, bwi_devclass, 0, 0);
235 MODULE_DEPEND(bwi, wlan, 1, 1, 1);
237 MODULE_DEPEND(bwi, wlan_ratectl_onoe, 1, 1, 1);
238 MODULE_DEPEND(bwi, wlan_ratectl_amrr, 1, 1, 1);
240 MODULE_DEPEND(bwi, pci, 1, 1, 1);
241 MODULE_DEPEND(bwi, cardbus, 1, 1, 1);
243 static const struct {
247 } bwi_bbpid_map[] = {
248 { 0x4301, 0x4301, 0x4301 },
249 { 0x4305, 0x4307, 0x4307 },
250 { 0x4403, 0x4403, 0x4402 },
251 { 0x4610, 0x4615, 0x4610 },
252 { 0x4710, 0x4715, 0x4710 },
253 { 0x4720, 0x4725, 0x4309 }
256 static const struct {
259 } bwi_regwin_count[] = {
272 #define CLKSRC(src) \
273 [BWI_CLKSRC_ ## src] = { \
274 .freq_min = BWI_CLKSRC_ ##src## _FMIN, \
275 .freq_max = BWI_CLKSRC_ ##src## _FMAX \
278 static const struct {
281 } bwi_clkfreq[BWI_CLKSRC_MAX] = {
289 static const uint8_t bwi_zero_addr[IEEE80211_ADDR_LEN];
291 static const struct ieee80211_rateset bwi_rateset_11b =
292 { 4, { 2, 4, 11, 22 } };
293 static const struct ieee80211_rateset bwi_rateset_11g =
294 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
297 bwi_read_sprom(struct bwi_softc *sc, uint16_t ofs)
299 return CSR_READ_2(sc, ofs + BWI_SPROM_START);
303 bwi_setup_desc32(struct bwi_softc *sc, struct bwi_desc32 *desc_array,
304 int ndesc, int desc_idx, bus_addr_t paddr, int buf_len,
307 struct bwi_desc32 *desc = &desc_array[desc_idx];
308 uint32_t ctrl, addr, addr_hi, addr_lo;
310 addr_lo = __SHIFTOUT(paddr, BWI_DESC32_A_ADDR_MASK);
311 addr_hi = __SHIFTOUT(paddr, BWI_DESC32_A_FUNC_MASK);
313 addr = __SHIFTIN(addr_lo, BWI_DESC32_A_ADDR_MASK) |
314 __SHIFTIN(BWI_DESC32_A_FUNC_TXRX, BWI_DESC32_A_FUNC_MASK);
316 ctrl = __SHIFTIN(buf_len, BWI_DESC32_C_BUFLEN_MASK) |
317 __SHIFTIN(addr_hi, BWI_DESC32_C_ADDRHI_MASK);
318 if (desc_idx == ndesc - 1)
319 ctrl |= BWI_DESC32_C_EOR;
322 ctrl |= BWI_DESC32_C_FRAME_START |
323 BWI_DESC32_C_FRAME_END |
327 desc->addr = htole32(addr);
328 desc->ctrl = htole32(ctrl);
332 bwi_probe(device_t dev)
334 const struct bwi_dev *b;
337 did = pci_get_device(dev);
338 vid = pci_get_vendor(dev);
340 for (b = bwi_devices; b->desc != NULL; ++b) {
341 if (b->did == did && b->vid == vid) {
342 device_set_desc(dev, b->desc);
350 bwi_attach(device_t dev)
352 struct bwi_softc *sc = device_get_softc(dev);
353 struct ieee80211com *ic = &sc->sc_ic;
354 struct ifnet *ifp = &ic->ic_if;
359 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
362 callout_init(&sc->sc_scan_ch);
363 callout_init(&sc->sc_calib_ch);
366 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
369 /* XXX Save more PCIR */
370 irq = pci_read_config(dev, PCIR_INTLINE, 4);
371 mem = pci_read_config(dev, BWI_PCIR_BAR, 4);
373 device_printf(dev, "chip is in D%d power mode "
374 "-- setting to D0\n", pci_get_powerstate(dev));
376 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
378 pci_write_config(dev, PCIR_INTLINE, irq, 4);
379 pci_write_config(dev, BWI_PCIR_BAR, irq, 4);
381 #endif /* !BURN_BRIDGE */
383 pci_enable_busmaster(dev);
385 /* Get more PCI information */
386 sc->sc_pci_revid = pci_get_revid(dev);
387 sc->sc_pci_subvid = pci_get_subvendor(dev);
388 sc->sc_pci_subdid = pci_get_subdevice(dev);
393 sc->sc_mem_rid = BWI_PCIR_BAR;
394 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
395 &sc->sc_mem_rid, RF_ACTIVE);
396 if (sc->sc_mem_res == NULL) {
397 device_printf(dev, "can't allocate IO memory\n");
400 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
401 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
407 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
409 RF_SHAREABLE | RF_ACTIVE);
410 if (sc->sc_irq_res == NULL) {
411 device_printf(dev, "can't allocate irq\n");
418 error = bwi_bbp_attach(sc);
422 error = bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST);
426 if (BWI_REGWIN_EXIST(&sc->sc_com_regwin)) {
427 error = bwi_set_clock_delay(sc);
431 error = bwi_set_clock_mode(sc, BWI_CLOCK_MODE_FAST);
435 error = bwi_get_pwron_delay(sc);
440 error = bwi_bus_attach(sc);
444 bwi_get_card_flags(sc);
448 for (i = 0; i < sc->sc_nmac; ++i) {
449 struct bwi_regwin *old;
451 mac = &sc->sc_mac[i];
452 error = bwi_regwin_switch(sc, &mac->mac_regwin, &old);
456 error = bwi_mac_lateattach(mac);
460 error = bwi_regwin_switch(sc, old, NULL);
466 * XXX First MAC is known to exist
469 mac = &sc->sc_mac[0];
472 bwi_bbp_power_off(sc);
474 error = bwi_dma_alloc(sc);
479 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
480 ifp->if_init = bwi_init;
481 ifp->if_ioctl = bwi_ioctl;
482 ifp->if_start = bwi_start;
483 ifp->if_watchdog = bwi_watchdog;
484 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
485 ifq_set_ready(&ifp->if_snd);
488 sc->sc_locale = __SHIFTOUT(bwi_read_sprom(sc, BWI_SPROM_CARD_INFO),
489 BWI_SPROM_CARD_INFO_LOCALE);
490 DPRINTF(sc, "locale: %d\n", sc->sc_locale);
493 * Setup ratesets, phytype, channels and get MAC address
495 if (phy->phy_mode == IEEE80211_MODE_11B ||
496 phy->phy_mode == IEEE80211_MODE_11G) {
499 ic->ic_sup_rates[IEEE80211_MODE_11B] = bwi_rateset_11b;
501 if (phy->phy_mode == IEEE80211_MODE_11B) {
502 chan_flags = IEEE80211_CHAN_B;
503 ic->ic_phytype = IEEE80211_T_DS;
505 chan_flags = IEEE80211_CHAN_CCK |
506 IEEE80211_CHAN_OFDM |
509 ic->ic_phytype = IEEE80211_T_OFDM;
510 ic->ic_sup_rates[IEEE80211_MODE_11G] =
514 /* XXX depend on locale */
515 for (i = 1; i <= 14; ++i) {
516 ic->ic_channels[i].ic_freq =
517 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
518 ic->ic_channels[i].ic_flags = chan_flags;
521 bwi_get_eaddr(sc, BWI_SPROM_11BG_EADDR, ic->ic_myaddr);
522 if (IEEE80211_IS_MULTICAST(ic->ic_myaddr)) {
523 bwi_get_eaddr(sc, BWI_SPROM_11A_EADDR, ic->ic_myaddr);
524 if (IEEE80211_IS_MULTICAST(ic->ic_myaddr)) {
525 device_printf(dev, "invalid MAC address: "
526 "%6D\n", ic->ic_myaddr, ":");
529 } else if (phy->phy_mode == IEEE80211_MODE_11A) {
534 panic("unknown phymode %d\n", phy->phy_mode);
537 sc->sc_fw_version = BWI_FW_VERSION3;
538 sc->sc_dwell_time = 200;
540 ic->ic_caps = IEEE80211_C_SHSLOT |
541 IEEE80211_C_SHPREAMBLE |
544 ic->ic_state = IEEE80211_S_INIT;
545 ic->ic_opmode = IEEE80211_M_STA;
547 ic->ic_updateslot = bwi_updateslot;
549 ieee80211_ifattach(ic);
551 ic->ic_headroom = sizeof(struct bwi_txbuf_hdr);
552 ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
554 sc->sc_newstate = ic->ic_newstate;
555 ic->ic_newstate = bwi_newstate;
557 ieee80211_media_init(ic, bwi_media_change, ieee80211_media_status);
559 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, bwi_intr, sc,
560 &sc->sc_irq_handle, ifp->if_serializer);
562 ieee80211_ifdetach(ic);
567 ieee80211_announce(ic);
576 bwi_detach(device_t dev)
578 struct bwi_softc *sc = device_get_softc(dev);
580 if (device_is_attached(dev)) {
581 struct ifnet *ifp = &sc->sc_ic.ic_if;
584 lwkt_serialize_enter(ifp->if_serializer);
586 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
587 lwkt_serialize_exit(ifp->if_serializer);
589 ieee80211_ifdetach(&sc->sc_ic);
591 for (i = 0; i < sc->sc_nmac; ++i)
592 bwi_mac_detach(&sc->sc_mac[i]);
595 if (sc->sc_irq_res != NULL) {
596 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
600 if (sc->sc_mem_res != NULL) {
601 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
611 bwi_shutdown(device_t dev)
613 struct bwi_softc *sc = device_get_softc(dev);
614 struct ifnet *ifp = &sc->sc_ic.ic_if;
616 lwkt_serialize_enter(ifp->if_serializer);
618 lwkt_serialize_exit(ifp->if_serializer);
623 bwi_power_on(struct bwi_softc *sc, int with_pll)
625 uint32_t gpio_in, gpio_out, gpio_en;
628 gpio_in = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4);
629 if (gpio_in & BWI_PCIM_GPIO_PWR_ON)
632 gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4);
633 gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4);
635 gpio_out |= BWI_PCIM_GPIO_PWR_ON;
636 gpio_en |= BWI_PCIM_GPIO_PWR_ON;
638 /* Turn off PLL first */
639 gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF;
640 gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF;
643 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4);
644 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4);
649 gpio_out &= ~BWI_PCIM_GPIO_PLL_PWR_OFF;
650 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4);
655 /* Clear "Signaled Target Abort" */
656 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2);
657 status &= ~PCIM_STATUS_STABORT;
658 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2);
662 bwi_power_off(struct bwi_softc *sc, int with_pll)
664 uint32_t gpio_out, gpio_en;
666 pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); /* dummy read */
667 gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4);
668 gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4);
670 gpio_out &= ~BWI_PCIM_GPIO_PWR_ON;
671 gpio_en |= BWI_PCIM_GPIO_PWR_ON;
673 gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF;
674 gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF;
677 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4);
678 pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4);
683 bwi_regwin_switch(struct bwi_softc *sc, struct bwi_regwin *rw,
684 struct bwi_regwin **old_rw)
691 if (!BWI_REGWIN_EXIST(rw))
694 if (sc->sc_cur_regwin != rw) {
695 error = bwi_regwin_select(sc, rw->rw_id);
697 if_printf(&sc->sc_ic.ic_if, "can't select regwin %d\n",
704 *old_rw = sc->sc_cur_regwin;
705 sc->sc_cur_regwin = rw;
710 bwi_regwin_select(struct bwi_softc *sc, int id)
712 uint32_t win = BWI_PCIM_REGWIN(id);
716 for (i = 0; i < RETRY_MAX; ++i) {
717 pci_write_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, win, 4);
718 if (pci_read_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, 4) == win)
728 bwi_regwin_info(struct bwi_softc *sc, uint16_t *type, uint8_t *rev)
732 val = CSR_READ_4(sc, BWI_ID_HI);
733 *type = BWI_ID_HI_REGWIN_TYPE(val);
734 *rev = BWI_ID_HI_REGWIN_REV(val);
736 DPRINTF(sc, "regwin: type 0x%03x, rev %d, vendor 0x%04x\n",
737 *type, *rev, __SHIFTOUT(val, BWI_ID_HI_REGWIN_VENDOR_MASK));
741 bwi_bbp_attach(struct bwi_softc *sc)
743 #define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
744 uint16_t bbp_id, rw_type;
747 int error, nregwin, i;
750 * Get 0th regwin information
751 * NOTE: 0th regwin should exist
753 error = bwi_regwin_select(sc, 0);
755 device_printf(sc->sc_dev, "can't select regwin 0\n");
758 bwi_regwin_info(sc, &rw_type, &rw_rev);
765 if (rw_type == BWI_REGWIN_T_COM) {
766 info = CSR_READ_4(sc, BWI_INFO);
767 bbp_id = __SHIFTOUT(info, BWI_INFO_BBPID_MASK);
769 BWI_CREATE_REGWIN(&sc->sc_com_regwin, 0, rw_type, rw_rev);
771 sc->sc_cap = CSR_READ_4(sc, BWI_CAPABILITY);
773 uint16_t did = pci_get_device(sc->sc_dev);
774 uint8_t revid = pci_get_revid(sc->sc_dev);
776 for (i = 0; i < N(bwi_bbpid_map); ++i) {
777 if (did >= bwi_bbpid_map[i].did_min &&
778 did <= bwi_bbpid_map[i].did_max) {
779 bbp_id = bwi_bbpid_map[i].bbp_id;
784 device_printf(sc->sc_dev, "no BBP id for device id "
789 info = __SHIFTIN(revid, BWI_INFO_BBPREV_MASK) |
790 __SHIFTIN(0, BWI_INFO_BBPPKG_MASK);
794 * Find out number of regwins
797 if (rw_type == BWI_REGWIN_T_COM && rw_rev >= 4) {
798 nregwin = __SHIFTOUT(info, BWI_INFO_NREGWIN_MASK);
800 for (i = 0; i < N(bwi_regwin_count); ++i) {
801 if (bwi_regwin_count[i].bbp_id == bbp_id) {
802 nregwin = bwi_regwin_count[i].nregwin;
807 device_printf(sc->sc_dev, "no number of win for "
808 "BBP id 0x%04x\n", bbp_id);
813 /* Record BBP id/rev for later using */
814 sc->sc_bbp_id = bbp_id;
815 sc->sc_bbp_rev = __SHIFTOUT(info, BWI_INFO_BBPREV_MASK);
816 sc->sc_bbp_pkg = __SHIFTOUT(info, BWI_INFO_BBPPKG_MASK);
817 device_printf(sc->sc_dev, "BBP id 0x%04x, BBP rev 0x%x, BBP pkg %d\n",
818 sc->sc_bbp_id, sc->sc_bbp_rev, sc->sc_bbp_pkg);
820 DPRINTF(sc, "nregwin %d, cap 0x%08x\n", nregwin, sc->sc_cap);
823 * Create rest of the regwins
826 /* Don't re-create common regwin, if it is already created */
827 i = BWI_REGWIN_EXIST(&sc->sc_com_regwin) ? 1 : 0;
829 for (; i < nregwin; ++i) {
831 * Get regwin information
833 error = bwi_regwin_select(sc, i);
835 device_printf(sc->sc_dev,
836 "can't select regwin %d\n", i);
839 bwi_regwin_info(sc, &rw_type, &rw_rev);
843 * 1) Bus (PCI/PCIE) regwin
845 * Ignore rest types of regwin
847 if (rw_type == BWI_REGWIN_T_BUSPCI ||
848 rw_type == BWI_REGWIN_T_BUSPCIE) {
849 if (BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) {
850 device_printf(sc->sc_dev,
851 "bus regwin already exists\n");
853 BWI_CREATE_REGWIN(&sc->sc_bus_regwin, i,
856 } else if (rw_type == BWI_REGWIN_T_MAC) {
857 /* XXX ignore return value */
858 bwi_mac_attach(sc, i, rw_rev);
862 /* At least one MAC shold exist */
863 if (!BWI_REGWIN_EXIST(&sc->sc_mac[0].mac_regwin)) {
864 device_printf(sc->sc_dev, "no MAC was found\n");
867 KKASSERT(sc->sc_nmac > 0);
869 /* Bus regwin must exist */
870 if (!BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) {
871 device_printf(sc->sc_dev, "no bus regwin was found\n");
875 /* Start with first MAC */
876 error = bwi_regwin_switch(sc, &sc->sc_mac[0].mac_regwin, NULL);
885 bwi_bus_init(struct bwi_softc *sc, struct bwi_mac *mac)
887 struct bwi_regwin *old, *bus;
891 bus = &sc->sc_bus_regwin;
892 KKASSERT(sc->sc_cur_regwin == &mac->mac_regwin);
895 * Tell bus to generate requested interrupts
897 if (bus->rw_rev < 6 && bus->rw_type == BWI_REGWIN_T_BUSPCI) {
899 * NOTE: Read BWI_FLAGS from MAC regwin
901 val = CSR_READ_4(sc, BWI_FLAGS);
903 error = bwi_regwin_switch(sc, bus, &old);
907 CSR_SETBITS_4(sc, BWI_INTRVEC, (val & BWI_FLAGS_INTR_MASK));
911 mac_mask = 1 << mac->mac_id;
913 error = bwi_regwin_switch(sc, bus, &old);
917 val = pci_read_config(sc->sc_dev, BWI_PCIR_INTCTL, 4);
918 val |= mac_mask << 8;
919 pci_write_config(sc->sc_dev, BWI_PCIR_INTCTL, val, 4);
922 if (sc->sc_flags & BWI_F_BUS_INITED)
925 if (bus->rw_type == BWI_REGWIN_T_BUSPCI) {
927 * Enable prefetch and burst
929 CSR_SETBITS_4(sc, BWI_BUS_CONFIG,
930 BWI_BUS_CONFIG_PREFETCH | BWI_BUS_CONFIG_BURST);
932 if (bus->rw_rev < 5) {
933 struct bwi_regwin *com = &sc->sc_com_regwin;
936 * Configure timeouts for bus operation
940 * Set service timeout and request timeout
942 CSR_SETBITS_4(sc, BWI_CONF_LO,
943 __SHIFTIN(BWI_CONF_LO_SERVTO, BWI_CONF_LO_SERVTO_MASK) |
944 __SHIFTIN(BWI_CONF_LO_REQTO, BWI_CONF_LO_REQTO_MASK));
947 * If there is common regwin, we switch to that regwin
948 * and switch back to bus regwin once we have done.
950 if (BWI_REGWIN_EXIST(com)) {
951 error = bwi_regwin_switch(sc, com, NULL);
956 /* Let bus know what we have changed */
957 CSR_WRITE_4(sc, BWI_BUS_ADDR, BWI_BUS_ADDR_MAGIC);
958 CSR_READ_4(sc, BWI_BUS_ADDR); /* Flush */
959 CSR_WRITE_4(sc, BWI_BUS_DATA, 0);
960 CSR_READ_4(sc, BWI_BUS_DATA); /* Flush */
962 if (BWI_REGWIN_EXIST(com)) {
963 error = bwi_regwin_switch(sc, bus, NULL);
967 } else if (bus->rw_rev >= 11) {
969 * Enable memory read multiple
971 CSR_SETBITS_4(sc, BWI_BUS_CONFIG, BWI_BUS_CONFIG_MRM);
977 sc->sc_flags |= BWI_F_BUS_INITED;
979 return bwi_regwin_switch(sc, old, NULL);
983 bwi_get_card_flags(struct bwi_softc *sc)
985 sc->sc_card_flags = bwi_read_sprom(sc, BWI_SPROM_CARD_FLAGS);
986 if (sc->sc_card_flags == 0xffff)
987 sc->sc_card_flags = 0;
989 if (sc->sc_pci_subvid == PCI_VENDOR_APPLE &&
990 sc->sc_pci_subdid == 0x4e && /* XXX */
991 sc->sc_pci_revid > 0x40)
992 sc->sc_card_flags |= BWI_CARD_F_PA_GPIO9;
994 DPRINTF(sc, "card flags 0x%04x\n", sc->sc_card_flags);
998 bwi_get_eaddr(struct bwi_softc *sc, uint16_t eaddr_ofs, uint8_t *eaddr)
1002 for (i = 0; i < 3; ++i) {
1003 *((uint16_t *)eaddr + i) =
1004 htobe16(bwi_read_sprom(sc, eaddr_ofs + 2 * i));
1009 bwi_get_clock_freq(struct bwi_softc *sc, struct bwi_clock_freq *freq)
1011 struct bwi_regwin *com;
1016 bzero(freq, sizeof(*freq));
1017 com = &sc->sc_com_regwin;
1019 KKASSERT(BWI_REGWIN_EXIST(com));
1020 KKASSERT(sc->sc_cur_regwin == com);
1021 KKASSERT(sc->sc_cap & BWI_CAP_CLKMODE);
1024 * Calculate clock frequency
1028 if (com->rw_rev < 6) {
1029 val = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4);
1030 if (val & BWI_PCIM_GPIO_OUT_CLKSRC) {
1031 src = BWI_CLKSRC_PCI;
1034 src = BWI_CLKSRC_CS_OSC;
1037 } else if (com->rw_rev < 10) {
1038 val = CSR_READ_4(sc, BWI_CLOCK_CTRL);
1040 src = __SHIFTOUT(val, BWI_CLOCK_CTRL_CLKSRC);
1041 if (src == BWI_CLKSRC_LP_OSC) {
1044 div = (__SHIFTOUT(val, BWI_CLOCK_CTRL_FDIV) + 1) << 2;
1046 /* Unknown source */
1047 if (src >= BWI_CLKSRC_MAX)
1048 src = BWI_CLKSRC_CS_OSC;
1051 val = CSR_READ_4(sc, BWI_CLOCK_INFO);
1053 src = BWI_CLKSRC_CS_OSC;
1054 div = (__SHIFTOUT(val, BWI_CLOCK_INFO_FDIV) + 1) << 2;
1057 KKASSERT(src >= 0 && src < BWI_CLKSRC_MAX);
1060 DPRINTF(sc, "clksrc %s\n",
1061 src == BWI_CLKSRC_PCI ? "PCI" :
1062 (src == BWI_CLKSRC_LP_OSC ? "LP_OSC" : "CS_OSC"));
1064 freq->clkfreq_min = bwi_clkfreq[src].freq_min / div;
1065 freq->clkfreq_max = bwi_clkfreq[src].freq_max / div;
1067 DPRINTF(sc, "clkfreq min %u, max %u\n",
1068 freq->clkfreq_min, freq->clkfreq_max);
1072 bwi_set_clock_mode(struct bwi_softc *sc, enum bwi_clock_mode clk_mode)
1074 struct bwi_regwin *old, *com;
1075 uint32_t clk_ctrl, clk_src;
1076 int error, pwr_off = 0;
1078 com = &sc->sc_com_regwin;
1079 if (!BWI_REGWIN_EXIST(com))
1082 if (com->rw_rev >= 10 || com->rw_rev < 6)
1086 * For common regwin whose rev is [6, 10), the chip
1087 * must be capable to change clock mode.
1089 if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0)
1092 error = bwi_regwin_switch(sc, com, &old);
1096 if (clk_mode == BWI_CLOCK_MODE_FAST)
1097 bwi_power_on(sc, 0); /* Don't turn on PLL */
1099 clk_ctrl = CSR_READ_4(sc, BWI_CLOCK_CTRL);
1100 clk_src = __SHIFTOUT(clk_ctrl, BWI_CLOCK_CTRL_CLKSRC);
1103 case BWI_CLOCK_MODE_FAST:
1104 clk_ctrl &= ~BWI_CLOCK_CTRL_SLOW;
1105 clk_ctrl |= BWI_CLOCK_CTRL_IGNPLL;
1107 case BWI_CLOCK_MODE_SLOW:
1108 clk_ctrl |= BWI_CLOCK_CTRL_SLOW;
1110 case BWI_CLOCK_MODE_DYN:
1111 clk_ctrl &= ~(BWI_CLOCK_CTRL_SLOW |
1112 BWI_CLOCK_CTRL_IGNPLL |
1113 BWI_CLOCK_CTRL_NODYN);
1114 if (clk_src != BWI_CLKSRC_CS_OSC) {
1115 clk_ctrl |= BWI_CLOCK_CTRL_NODYN;
1120 CSR_WRITE_4(sc, BWI_CLOCK_CTRL, clk_ctrl);
1123 bwi_power_off(sc, 0); /* Leave PLL as it is */
1125 return bwi_regwin_switch(sc, old, NULL);
1129 bwi_set_clock_delay(struct bwi_softc *sc)
1131 struct bwi_regwin *old, *com;
1134 com = &sc->sc_com_regwin;
1135 if (!BWI_REGWIN_EXIST(com))
1138 error = bwi_regwin_switch(sc, com, &old);
1142 if (sc->sc_bbp_id == BWI_BBPID_BCM4321) {
1143 if (sc->sc_bbp_rev == 0)
1144 CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC0);
1145 else if (sc->sc_bbp_rev == 1)
1146 CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC1);
1149 if (sc->sc_cap & BWI_CAP_CLKMODE) {
1150 if (com->rw_rev >= 10) {
1151 CSR_FILT_SETBITS_4(sc, BWI_CLOCK_INFO, 0xffff, 0x40000);
1153 struct bwi_clock_freq freq;
1155 bwi_get_clock_freq(sc, &freq);
1156 CSR_WRITE_4(sc, BWI_PLL_ON_DELAY,
1157 howmany(freq.clkfreq_max * 150, 1000000));
1158 CSR_WRITE_4(sc, BWI_FREQ_SEL_DELAY,
1159 howmany(freq.clkfreq_max * 15, 1000000));
1163 return bwi_regwin_switch(sc, old, NULL);
1169 struct bwi_softc *sc = xsc;
1170 struct ieee80211com *ic = &sc->sc_ic;
1171 struct ifnet *ifp = &ic->ic_if;
1172 struct bwi_mac *mac;
1175 ASSERT_SERIALIZED(ifp->if_serializer);
1177 DPRINTF(sc, "%s\n", __func__);
1179 error = bwi_stop(sc);
1181 if_printf(ifp, "can't stop\n");
1185 bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST);
1189 mac = &sc->sc_mac[0];
1190 error = bwi_regwin_switch(sc, &mac->mac_regwin, NULL);
1194 error = bwi_mac_init(mac);
1198 bwi_bbp_power_on(sc, BWI_CLOCK_MODE_DYN);
1200 bcopy(IF_LLADDR(ifp), ic->ic_myaddr, sizeof(ic->ic_myaddr));
1202 bwi_set_bssid(sc, bwi_zero_addr); /* Clear BSSID */
1203 bwi_set_addr_filter(sc, BWI_ADDR_FILTER_MYADDR, ic->ic_myaddr);
1205 bwi_mac_reset_hwkeys(mac);
1207 if ((mac->mac_flags & BWI_MAC_F_HAS_TXSTATS) == 0) {
1212 * Drain any possible pending TX status
1214 for (i = 0; i < NRETRY; ++i) {
1215 if ((CSR_READ_4(sc, BWI_TXSTATUS_0) &
1216 BWI_TXSTATUS_0_MORE) == 0)
1218 CSR_READ_4(sc, BWI_TXSTATUS_1);
1221 if_printf(ifp, "can't drain TX status\n");
1225 if (mac->mac_phy.phy_mode == IEEE80211_MODE_11G)
1226 bwi_mac_updateslot(mac, 1);
1229 error = bwi_mac_start(mac);
1234 bwi_enable_intrs(sc, BWI_INIT_INTRS);
1236 ifp->if_flags |= IFF_RUNNING;
1237 ifp->if_flags &= ~IFF_OACTIVE;
1239 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
1240 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL)
1241 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
1243 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
1251 bwi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t req, struct ucred *cr)
1253 struct bwi_softc *sc = ifp->if_softc;
1256 ASSERT_SERIALIZED(ifp->if_serializer);
1260 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1261 (IFF_UP | IFF_RUNNING)) {
1262 struct bwi_mac *mac;
1265 KKASSERT(sc->sc_cur_regwin->rw_type ==
1267 mac = (struct bwi_mac *)sc->sc_cur_regwin;
1269 if ((ifp->if_flags & IFF_PROMISC) &&
1270 (sc->sc_flags & BWI_F_PROMISC) == 0) {
1272 sc->sc_flags |= BWI_F_PROMISC;
1273 } else if ((ifp->if_flags & IFF_PROMISC) == 0 &&
1274 (sc->sc_flags & BWI_F_PROMISC)) {
1276 sc->sc_flags &= ~BWI_F_PROMISC;
1280 bwi_mac_set_promisc(mac, promisc);
1283 if (ifp->if_flags & IFF_UP) {
1284 if ((ifp->if_flags & IFF_RUNNING) == 0)
1287 if (ifp->if_flags & IFF_RUNNING)
1292 error = ieee80211_ioctl(&sc->sc_ic, cmd, req, cr);
1296 if (error == ENETRESET) {
1297 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1298 (IFF_UP | IFF_RUNNING))
1306 bwi_start(struct ifnet *ifp)
1308 struct bwi_softc *sc = ifp->if_softc;
1309 struct ieee80211com *ic = &sc->sc_ic;
1310 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
1313 ASSERT_SERIALIZED(ifp->if_serializer);
1315 if ((ifp->if_flags & IFF_OACTIVE) ||
1316 (ifp->if_flags & IFF_RUNNING) == 0)
1322 while (tbd->tbd_buf[idx].tb_mbuf == NULL) {
1323 struct ieee80211_frame *wh;
1324 struct ieee80211_node *ni;
1328 if (!IF_QEMPTY(&ic->ic_mgtq)) {
1329 IF_DEQUEUE(&ic->ic_mgtq, m);
1331 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
1332 m->m_pkthdr.rcvif = NULL;
1335 } else if (!ifq_is_empty(&ifp->if_snd)) {
1336 struct ether_header *eh;
1338 if (ic->ic_state != IEEE80211_S_RUN)
1341 m = ifq_dequeue(&ifp->if_snd, NULL);
1345 if (m->m_len < sizeof(*eh)) {
1346 m = m_pullup(m, sizeof(*eh));
1352 eh = mtod(m, struct ether_header *);
1354 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
1365 m = ieee80211_encap(ic, m, ni);
1367 ieee80211_free_node(ni);
1375 if (ic->ic_rawbpf != NULL)
1376 bpf_mtap(ic->ic_rawbpf, m);
1378 wh = mtod(m, struct ieee80211_frame *);
1379 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1380 if (ieee80211_crypto_encap(ic, ni, m) == NULL) {
1381 ieee80211_free_node(ni);
1387 wh = NULL; /* Catch any invalid use */
1390 ieee80211_free_node(ni);
1394 if (bwi_encap(sc, idx, m, ni) != 0) {
1395 /* 'm' is freed in bwi_encap() if we reach here */
1397 ieee80211_free_node(ni);
1404 idx = (idx + 1) % BWI_TX_NDESC;
1406 if (tbd->tbd_used + BWI_TX_NSPRDESC >= BWI_TX_NDESC) {
1407 ifp->if_flags |= IFF_OACTIVE;
1414 sc->sc_tx_timer = 5;
1419 bwi_watchdog(struct ifnet *ifp)
1421 struct bwi_softc *sc = ifp->if_softc;
1423 ASSERT_SERIALIZED(ifp->if_serializer);
1427 if ((ifp->if_flags & IFF_RUNNING) == 0)
1430 if (sc->sc_tx_timer) {
1431 if (--sc->sc_tx_timer == 0) {
1432 if_printf(ifp, "watchdog timeout\n");
1439 ieee80211_watchdog(&sc->sc_ic);
1443 bwi_stop(struct bwi_softc *sc)
1445 struct ieee80211com *ic = &sc->sc_ic;
1446 struct ifnet *ifp = &ic->ic_if;
1447 struct bwi_mac *mac;
1448 int i, error, pwr_off = 0;
1450 ASSERT_SERIALIZED(ifp->if_serializer);
1452 DPRINTF(sc, "%s\n", __func__);
1454 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1456 if (ifp->if_flags & IFF_RUNNING) {
1457 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
1458 mac = (struct bwi_mac *)sc->sc_cur_regwin;
1460 bwi_disable_intrs(sc, BWI_ALL_INTRS);
1461 CSR_READ_4(sc, BWI_MAC_INTR_MASK);
1465 for (i = 0; i < sc->sc_nmac; ++i) {
1466 struct bwi_regwin *old_rw;
1468 mac = &sc->sc_mac[i];
1469 if ((mac->mac_flags & BWI_MAC_F_INITED) == 0)
1472 error = bwi_regwin_switch(sc, &mac->mac_regwin, &old_rw);
1476 bwi_mac_shutdown(mac);
1479 bwi_regwin_switch(sc, old_rw, NULL);
1483 bwi_bbp_power_off(sc);
1485 sc->sc_tx_timer = 0;
1487 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1494 struct bwi_softc *sc = xsc;
1495 struct ifnet *ifp = &sc->sc_ic.ic_if;
1496 uint32_t intr_status;
1497 uint32_t txrx_intr_status[BWI_TXRX_NRING];
1500 ASSERT_SERIALIZED(ifp->if_serializer);
1502 if ((ifp->if_flags & IFF_RUNNING) == 0)
1506 * Get interrupt status
1508 intr_status = CSR_READ_4(sc, BWI_MAC_INTR_STATUS);
1509 if (intr_status == 0xffffffff) /* Not for us */
1513 if_printf(ifp, "intr status 0x%08x\n", intr_status);
1516 intr_status &= CSR_READ_4(sc, BWI_MAC_INTR_MASK);
1517 if (intr_status == 0) /* Nothing is interesting */
1522 if_printf(ifp, "TX/RX intr");
1524 for (i = 0; i < BWI_TXRX_NRING; ++i) {
1527 if (BWI_TXRX_IS_RX(i))
1528 mask = BWI_TXRX_RX_INTRS;
1530 mask = BWI_TXRX_TX_INTRS;
1532 txrx_intr_status[i] =
1533 CSR_READ_4(sc, BWI_TXRX_INTR_STATUS(i)) & mask;
1536 kprintf(", %d 0x%08x", i, txrx_intr_status[i]);
1539 if (txrx_intr_status[i] & BWI_TXRX_INTR_ERROR) {
1540 if_printf(ifp, "intr fatal TX/RX (%d) error 0x%08x\n",
1541 i, txrx_intr_status[i]);
1550 * Acknowledge interrupt
1552 CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, intr_status);
1554 for (i = 0; i < BWI_TXRX_NRING; ++i)
1555 CSR_WRITE_4(sc, BWI_TXRX_INTR_STATUS(i), txrx_intr_status[i]);
1557 /* Disable all interrupts */
1558 bwi_disable_intrs(sc, BWI_ALL_INTRS);
1560 if (intr_status & BWI_INTR_PHY_TXERR)
1561 if_printf(ifp, "intr PHY TX error\n");
1564 /* TODO: reset device */
1567 if (intr_status & BWI_INTR_TBTT) {
1568 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
1569 bwi_mac_config_ps((struct bwi_mac *)sc->sc_cur_regwin);
1572 if (intr_status & BWI_INTR_EO_ATIM)
1573 if_printf(ifp, "EO_ATIM\n");
1575 if (intr_status & BWI_INTR_PMQ) {
1577 if ((CSR_READ_4(sc, BWI_MAC_PS_STATUS) & 0x8) == 0)
1580 CSR_WRITE_2(sc, BWI_MAC_PS_STATUS, 0x2);
1583 if (intr_status & BWI_INTR_NOISE)
1584 if_printf(ifp, "intr noise\n");
1586 if (txrx_intr_status[0] & BWI_TXRX_INTR_RX)
1589 if (txrx_intr_status[3] & BWI_TXRX_INTR_RX)
1590 sc->sc_txeof_status(sc);
1592 if (intr_status & BWI_INTR_TX_DONE)
1597 /* Re-enable interrupts */
1598 bwi_enable_intrs(sc, BWI_INIT_INTRS);
1602 bwi_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
1604 struct bwi_softc *sc = ic->ic_if.if_softc;
1605 struct ifnet *ifp = &ic->ic_if;
1608 ASSERT_SERIALIZED(ifp->if_serializer);
1610 callout_stop(&sc->sc_scan_ch);
1611 callout_stop(&sc->sc_calib_ch);
1613 if (nstate == IEEE80211_S_INIT)
1616 error = bwi_set_chan(sc, ic->ic_curchan);
1618 if_printf(ifp, "can't set channel to %u\n",
1619 ieee80211_chan2ieee(ic, ic->ic_curchan));
1623 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1625 } else if (nstate == IEEE80211_S_RUN) {
1626 struct bwi_mac *mac;
1628 bwi_set_bssid(sc, ic->ic_bss->ni_bssid);
1630 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
1631 mac = (struct bwi_mac *)sc->sc_cur_regwin;
1633 /* Initial TX power calibration */
1634 bwi_mac_calibrate_txpower(mac);
1636 bwi_set_bssid(sc, bwi_zero_addr);
1640 error = sc->sc_newstate(ic, nstate, arg);
1642 if (nstate == IEEE80211_S_SCAN) {
1643 callout_reset(&sc->sc_scan_ch,
1644 (sc->sc_dwell_time * hz) / 1000,
1646 } else if (nstate == IEEE80211_S_RUN) {
1647 /* XXX 15 seconds */
1648 callout_reset(&sc->sc_calib_ch, hz * 15, bwi_calibrate, sc);
1654 bwi_media_change(struct ifnet *ifp)
1658 ASSERT_SERIALIZED(ifp->if_serializer);
1660 error = ieee80211_media_change(ifp);
1661 if (error != ENETRESET)
1664 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
1665 bwi_init(ifp->if_softc);
1670 bwi_dma_alloc(struct bwi_softc *sc)
1672 int error, i, has_txstats;
1673 bus_addr_t lowaddr = 0;
1674 bus_size_t tx_ring_sz, rx_ring_sz, desc_sz = 0;
1675 uint32_t txrx_ctrl_step = 0;
1678 for (i = 0; i < sc->sc_nmac; ++i) {
1679 if (sc->sc_mac[i].mac_flags & BWI_MAC_F_HAS_TXSTATS) {
1685 switch (sc->sc_bus_space) {
1686 case BWI_BUS_SPACE_30BIT:
1687 case BWI_BUS_SPACE_32BIT:
1688 if (sc->sc_bus_space == BWI_BUS_SPACE_30BIT)
1689 lowaddr = BWI_BUS_SPACE_MAXADDR;
1691 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1692 desc_sz = sizeof(struct bwi_desc32);
1693 txrx_ctrl_step = 0x20;
1695 sc->sc_init_tx_ring = bwi_init_tx_ring32;
1696 sc->sc_free_tx_ring = bwi_free_tx_ring32;
1697 sc->sc_init_rx_ring = bwi_init_rx_ring32;
1698 sc->sc_free_rx_ring = bwi_free_rx_ring32;
1699 sc->sc_setup_rxdesc = bwi_setup_rx_desc32;
1700 sc->sc_setup_txdesc = bwi_setup_tx_desc32;
1701 sc->sc_rxeof = bwi_rxeof32;
1702 sc->sc_start_tx = bwi_start_tx32;
1704 sc->sc_init_txstats = bwi_init_txstats32;
1705 sc->sc_free_txstats = bwi_free_txstats32;
1706 sc->sc_txeof_status = bwi_txeof_status32;
1710 case BWI_BUS_SPACE_64BIT:
1711 lowaddr = BUS_SPACE_MAXADDR; /* XXX */
1712 desc_sz = sizeof(struct bwi_desc64);
1713 txrx_ctrl_step = 0x40;
1715 sc->sc_init_tx_ring = bwi_init_tx_ring64;
1716 sc->sc_free_tx_ring = bwi_free_tx_ring64;
1717 sc->sc_init_rx_ring = bwi_init_rx_ring64;
1718 sc->sc_free_rx_ring = bwi_free_rx_ring64;
1719 sc->sc_setup_rxdesc = bwi_setup_rx_desc64;
1720 sc->sc_setup_txdesc = bwi_setup_tx_desc64;
1721 sc->sc_rxeof = bwi_rxeof64;
1722 sc->sc_start_tx = bwi_start_tx64;
1724 sc->sc_init_txstats = bwi_init_txstats64;
1725 sc->sc_free_txstats = bwi_free_txstats64;
1726 sc->sc_txeof_status = bwi_txeof_status64;
1731 KKASSERT(lowaddr != 0);
1732 KKASSERT(desc_sz != 0);
1733 KKASSERT(txrx_ctrl_step != 0);
1735 tx_ring_sz = roundup(desc_sz * BWI_TX_NDESC, BWI_RING_ALIGN);
1736 rx_ring_sz = roundup(desc_sz * BWI_RX_NDESC, BWI_RING_ALIGN);
1739 * Create top level DMA tag
1741 error = bus_dma_tag_create(NULL, BWI_ALIGN, 0,
1742 lowaddr, BUS_SPACE_MAXADDR,
1745 BUS_SPACE_UNRESTRICTED,
1746 BUS_SPACE_MAXSIZE_32BIT,
1747 0, &sc->sc_parent_dtag);
1749 device_printf(sc->sc_dev, "can't create parent DMA tag\n");
1753 #define TXRX_CTRL(idx) (BWI_TXRX_CTRL_BASE + (idx) * txrx_ctrl_step)
1756 * Create TX ring DMA stuffs
1758 error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_RING_ALIGN, 0,
1759 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1761 tx_ring_sz, 1, BUS_SPACE_MAXSIZE_32BIT,
1762 0, &sc->sc_txring_dtag);
1764 device_printf(sc->sc_dev, "can't create TX ring DMA tag\n");
1768 for (i = 0; i < BWI_TX_NRING; ++i) {
1769 error = bwi_dma_ring_alloc(sc, sc->sc_txring_dtag,
1770 &sc->sc_tx_rdata[i], tx_ring_sz,
1773 device_printf(sc->sc_dev, "%dth TX ring "
1774 "DMA alloc failed\n", i);
1780 * Create RX ring DMA stuffs
1782 error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_RING_ALIGN, 0,
1783 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1785 rx_ring_sz, 1, BUS_SPACE_MAXSIZE_32BIT,
1786 0, &sc->sc_rxring_dtag);
1788 device_printf(sc->sc_dev, "can't create RX ring DMA tag\n");
1792 error = bwi_dma_ring_alloc(sc, sc->sc_rxring_dtag, &sc->sc_rx_rdata,
1793 rx_ring_sz, TXRX_CTRL(0));
1795 device_printf(sc->sc_dev, "RX ring DMA alloc failed\n");
1800 error = bwi_dma_txstats_alloc(sc, TXRX_CTRL(3), desc_sz);
1802 device_printf(sc->sc_dev,
1803 "TX stats DMA alloc failed\n");
1810 return bwi_dma_mbuf_create(sc);
1814 bwi_dma_free(struct bwi_softc *sc)
1816 if (sc->sc_txring_dtag != NULL) {
1819 for (i = 0; i < BWI_TX_NRING; ++i) {
1820 struct bwi_ring_data *rd = &sc->sc_tx_rdata[i];
1822 if (rd->rdata_desc != NULL) {
1823 bus_dmamap_unload(sc->sc_txring_dtag,
1825 bus_dmamem_free(sc->sc_txring_dtag,
1830 bus_dma_tag_destroy(sc->sc_txring_dtag);
1833 if (sc->sc_rxring_dtag != NULL) {
1834 struct bwi_ring_data *rd = &sc->sc_rx_rdata;
1836 if (rd->rdata_desc != NULL) {
1837 bus_dmamap_unload(sc->sc_rxring_dtag, rd->rdata_dmap);
1838 bus_dmamem_free(sc->sc_rxring_dtag, rd->rdata_desc,
1841 bus_dma_tag_destroy(sc->sc_rxring_dtag);
1844 bwi_dma_txstats_free(sc);
1845 bwi_dma_mbuf_destroy(sc, BWI_TX_NRING, 1);
1847 if (sc->sc_parent_dtag != NULL)
1848 bus_dma_tag_destroy(sc->sc_parent_dtag);
1852 bwi_dma_ring_alloc(struct bwi_softc *sc, bus_dma_tag_t dtag,
1853 struct bwi_ring_data *rd, bus_size_t size,
1858 error = bus_dmamem_alloc(dtag, &rd->rdata_desc,
1859 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1862 device_printf(sc->sc_dev, "can't allocate DMA mem\n");
1866 error = bus_dmamap_load(dtag, rd->rdata_dmap, rd->rdata_desc, size,
1867 bwi_dma_ring_addr, &rd->rdata_paddr,
1870 device_printf(sc->sc_dev, "can't load DMA mem\n");
1871 bus_dmamem_free(dtag, rd->rdata_desc, rd->rdata_dmap);
1872 rd->rdata_desc = NULL;
1876 rd->rdata_txrx_ctrl = txrx_ctrl;
1881 bwi_dma_txstats_alloc(struct bwi_softc *sc, uint32_t ctrl_base,
1884 struct bwi_txstats_data *st;
1885 bus_size_t dma_size;
1888 st = kmalloc(sizeof(*st), M_DEVBUF, M_WAITOK | M_ZERO);
1889 sc->sc_txstats = st;
1892 * Create TX stats descriptor DMA stuffs
1894 dma_size = roundup(desc_sz * BWI_TXSTATS_NDESC, BWI_RING_ALIGN);
1896 error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_RING_ALIGN, 0,
1897 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1899 dma_size, 1, BUS_SPACE_MAXSIZE_32BIT,
1900 0, &st->stats_ring_dtag);
1902 device_printf(sc->sc_dev, "can't create txstats ring "
1907 error = bus_dmamem_alloc(st->stats_ring_dtag, &st->stats_ring,
1908 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1909 &st->stats_ring_dmap);
1911 device_printf(sc->sc_dev, "can't allocate txstats ring "
1913 bus_dma_tag_destroy(st->stats_ring_dtag);
1914 st->stats_ring_dtag = NULL;
1918 error = bus_dmamap_load(st->stats_ring_dtag, st->stats_ring_dmap,
1919 st->stats_ring, dma_size,
1920 bwi_dma_ring_addr, &st->stats_ring_paddr,
1923 device_printf(sc->sc_dev, "can't load txstats ring DMA mem\n");
1924 bus_dmamem_free(st->stats_ring_dtag, st->stats_ring,
1925 st->stats_ring_dmap);
1926 bus_dma_tag_destroy(st->stats_ring_dtag);
1927 st->stats_ring_dtag = NULL;
1932 * Create TX stats DMA stuffs
1934 dma_size = roundup(sizeof(struct bwi_txstats) * BWI_TXSTATS_NDESC,
1937 error = bus_dma_tag_create(sc->sc_parent_dtag, BWI_ALIGN, 0,
1938 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1940 dma_size, 1, BUS_SPACE_MAXSIZE_32BIT,
1941 0, &st->stats_dtag);
1943 device_printf(sc->sc_dev, "can't create txstats DMA tag\n");
1947 error = bus_dmamem_alloc(st->stats_dtag, (void **)&st->stats,
1948 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1951 device_printf(sc->sc_dev, "can't allocate txstats DMA mem\n");
1952 bus_dma_tag_destroy(st->stats_dtag);
1953 st->stats_dtag = NULL;
1957 error = bus_dmamap_load(st->stats_dtag, st->stats_dmap, st->stats,
1958 dma_size, bwi_dma_ring_addr, &st->stats_paddr,
1961 device_printf(sc->sc_dev, "can't load txstats DMA mem\n");
1962 bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap);
1963 bus_dma_tag_destroy(st->stats_dtag);
1964 st->stats_dtag = NULL;
1968 st->stats_ctrl_base = ctrl_base;
1973 bwi_dma_txstats_free(struct bwi_softc *sc)
1975 struct bwi_txstats_data *st;
1977 if (sc->sc_txstats == NULL)
1979 st = sc->sc_txstats;
1981 if (st->stats_ring_dtag != NULL) {
1982 bus_dmamap_unload(st->stats_ring_dtag, st->stats_ring_dmap);
1983 bus_dmamem_free(st->stats_ring_dtag, st->stats_ring,
1984 st->stats_ring_dmap);
1985 bus_dma_tag_destroy(st->stats_ring_dtag);
1988 if (st->stats_dtag != NULL) {
1989 bus_dmamap_unload(st->stats_dtag, st->stats_dmap);
1990 bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap);
1991 bus_dma_tag_destroy(st->stats_dtag);
1994 kfree(st, M_DEVBUF);
1998 bwi_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
2000 KASSERT(nseg == 1, ("too many segments\n"));
2001 *((bus_addr_t *)arg) = seg->ds_addr;
2005 bwi_dma_mbuf_create(struct bwi_softc *sc)
2007 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
2008 int i, j, k, ntx, error;
2011 * Create TX/RX mbuf DMA tag
2013 error = bus_dma_tag_create(sc->sc_parent_dtag, 1, 0,
2014 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2015 NULL, NULL, MCLBYTES, 1,
2016 BUS_SPACE_MAXSIZE_32BIT,
2017 0, &sc->sc_buf_dtag);
2019 device_printf(sc->sc_dev, "can't create mbuf DMA tag\n");
2026 * Create TX mbuf DMA map
2028 for (i = 0; i < BWI_TX_NRING; ++i) {
2029 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i];
2031 for (j = 0; j < BWI_TX_NDESC; ++j) {
2032 error = bus_dmamap_create(sc->sc_buf_dtag, 0,
2033 &tbd->tbd_buf[j].tb_dmap);
2035 device_printf(sc->sc_dev, "can't create "
2036 "%dth tbd, %dth DMA map\n", i, j);
2039 for (k = 0; k < j; ++k) {
2040 bus_dmamap_destroy(sc->sc_buf_dtag,
2041 tbd->tbd_buf[k].tb_dmap);
2050 * Create RX mbuf DMA map and a spare DMA map
2052 error = bus_dmamap_create(sc->sc_buf_dtag, 0,
2053 &rbd->rbd_tmp_dmap);
2055 device_printf(sc->sc_dev,
2056 "can't create spare RX buf DMA map\n");
2060 for (j = 0; j < BWI_RX_NDESC; ++j) {
2061 error = bus_dmamap_create(sc->sc_buf_dtag, 0,
2062 &rbd->rbd_buf[j].rb_dmap);
2064 device_printf(sc->sc_dev, "can't create %dth "
2065 "RX buf DMA map\n", j);
2067 for (k = 0; k < j; ++k) {
2068 bus_dmamap_destroy(sc->sc_buf_dtag,
2069 rbd->rbd_buf[j].rb_dmap);
2071 bus_dmamap_destroy(sc->sc_buf_dtag,
2079 bwi_dma_mbuf_destroy(sc, ntx, 0);
2084 bwi_dma_mbuf_destroy(struct bwi_softc *sc, int ntx, int nrx)
2088 if (sc->sc_buf_dtag == NULL)
2091 for (i = 0; i < ntx; ++i) {
2092 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i];
2094 for (j = 0; j < BWI_TX_NDESC; ++j) {
2095 struct bwi_txbuf *tb = &tbd->tbd_buf[j];
2097 if (tb->tb_mbuf != NULL) {
2098 bus_dmamap_unload(sc->sc_buf_dtag,
2100 m_freem(tb->tb_mbuf);
2102 if (tb->tb_ni != NULL)
2103 ieee80211_free_node(tb->tb_ni);
2104 bus_dmamap_destroy(sc->sc_buf_dtag, tb->tb_dmap);
2109 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
2111 bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_tmp_dmap);
2112 for (j = 0; j < BWI_RX_NDESC; ++j) {
2113 struct bwi_rxbuf *rb = &rbd->rbd_buf[j];
2115 if (rb->rb_mbuf != NULL) {
2116 bus_dmamap_unload(sc->sc_buf_dtag,
2118 m_freem(rb->rb_mbuf);
2120 bus_dmamap_destroy(sc->sc_buf_dtag, rb->rb_dmap);
2124 bus_dma_tag_destroy(sc->sc_buf_dtag);
2125 sc->sc_buf_dtag = NULL;
2129 bwi_enable_intrs(struct bwi_softc *sc, uint32_t enable_intrs)
2131 CSR_SETBITS_4(sc, BWI_MAC_INTR_MASK, enable_intrs);
2135 bwi_disable_intrs(struct bwi_softc *sc, uint32_t disable_intrs)
2137 CSR_CLRBITS_4(sc, BWI_MAC_INTR_MASK, disable_intrs);
2141 bwi_init_tx_ring32(struct bwi_softc *sc, int ring_idx)
2143 struct bwi_ring_data *rd;
2144 struct bwi_txbuf_data *tbd;
2145 uint32_t val, addr_hi, addr_lo;
2147 KKASSERT(ring_idx < BWI_TX_NRING);
2148 rd = &sc->sc_tx_rdata[ring_idx];
2149 tbd = &sc->sc_tx_bdata[ring_idx];
2154 bzero(rd->rdata_desc, sizeof(struct bwi_desc32) * BWI_TX_NDESC);
2155 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap,
2156 BUS_DMASYNC_PREWRITE);
2158 addr_lo = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_ADDR_MASK);
2159 addr_hi = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_FUNC_MASK);
2161 val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) |
2162 __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX,
2163 BWI_TXRX32_RINGINFO_FUNC_MASK);
2164 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, val);
2166 val = __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) |
2167 BWI_TXRX32_CTRL_ENABLE;
2168 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, val);
2174 bwi_init_rxdesc_ring32(struct bwi_softc *sc, uint32_t ctrl_base,
2175 bus_addr_t paddr, int hdr_size, int ndesc)
2177 uint32_t val, addr_hi, addr_lo;
2179 addr_lo = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_ADDR_MASK);
2180 addr_hi = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_FUNC_MASK);
2182 val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) |
2183 __SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX,
2184 BWI_TXRX32_RINGINFO_FUNC_MASK);
2185 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_RINGINFO, val);
2187 val = __SHIFTIN(hdr_size, BWI_RX32_CTRL_HDRSZ_MASK) |
2188 __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) |
2189 BWI_TXRX32_CTRL_ENABLE;
2190 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_CTRL, val);
2192 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX,
2193 (ndesc - 1) * sizeof(struct bwi_desc32));
2197 bwi_init_rx_ring32(struct bwi_softc *sc)
2199 struct bwi_ring_data *rd = &sc->sc_rx_rdata;
2202 sc->sc_rx_bdata.rbd_idx = 0;
2204 for (i = 0; i < BWI_RX_NDESC; ++i) {
2205 error = bwi_newbuf(sc, i, 1);
2207 if_printf(&sc->sc_ic.ic_if,
2208 "can't allocate %dth RX buffer\n", i);
2212 bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap,
2213 BUS_DMASYNC_PREWRITE);
2215 bwi_init_rxdesc_ring32(sc, rd->rdata_txrx_ctrl, rd->rdata_paddr,
2216 sizeof(struct bwi_rxbuf_hdr), BWI_RX_NDESC);
2221 bwi_init_txstats32(struct bwi_softc *sc)
2223 struct bwi_txstats_data *st = sc->sc_txstats;
2224 bus_addr_t stats_paddr;
2227 bzero(st->stats, BWI_TXSTATS_NDESC * sizeof(struct bwi_txstats));
2228 bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_PREWRITE);
2232 stats_paddr = st->stats_paddr;
2233 for (i = 0; i < BWI_TXSTATS_NDESC; ++i) {
2234 bwi_setup_desc32(sc, st->stats_ring, BWI_TXSTATS_NDESC, i,
2235 stats_paddr, sizeof(struct bwi_txstats), 0);
2236 stats_paddr += sizeof(struct bwi_txstats);
2238 bus_dmamap_sync(st->stats_ring_dtag, st->stats_ring_dmap,
2239 BUS_DMASYNC_PREWRITE);
2241 bwi_init_rxdesc_ring32(sc, st->stats_ctrl_base,
2242 st->stats_ring_paddr, 0, BWI_TXSTATS_NDESC);
2247 bwi_setup_rx_desc32(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr,
2250 struct bwi_ring_data *rd = &sc->sc_rx_rdata;
2252 KKASSERT(buf_idx < BWI_RX_NDESC);
2253 bwi_setup_desc32(sc, rd->rdata_desc, BWI_RX_NDESC, buf_idx,
2258 bwi_setup_tx_desc32(struct bwi_softc *sc, struct bwi_ring_data *rd,
2259 int buf_idx, bus_addr_t paddr, int buf_len)
2261 KKASSERT(buf_idx < BWI_TX_NDESC);
2262 bwi_setup_desc32(sc, rd->rdata_desc, BWI_TX_NDESC, buf_idx,
2267 bwi_init_tx_ring64(struct bwi_softc *sc, int ring_idx)
2274 bwi_init_rx_ring64(struct bwi_softc *sc)
2281 bwi_init_txstats64(struct bwi_softc *sc)
2288 bwi_setup_rx_desc64(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr,
2295 bwi_setup_tx_desc64(struct bwi_softc *sc, struct bwi_ring_data *rd,
2296 int buf_idx, bus_addr_t paddr, int buf_len)
2302 bwi_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg,
2303 bus_size_t mapsz __unused, int error)
2306 KASSERT(nseg == 1, ("too many segments(%d)\n", nseg));
2307 *((bus_addr_t *)arg) = seg->ds_addr;
2312 bwi_newbuf(struct bwi_softc *sc, int buf_idx, int init)
2314 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
2315 struct bwi_rxbuf *rxbuf = &rbd->rbd_buf[buf_idx];
2316 struct bwi_rxbuf_hdr *hdr;
2322 KKASSERT(buf_idx < BWI_RX_NDESC);
2324 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2329 * If the NIC is up and running, we need to:
2330 * - Clear RX buffer's header.
2331 * - Restore RX descriptor settings.
2338 m->m_len = m->m_pkthdr.len = MCLBYTES;
2341 * Try to load RX buf into temporary DMA map
2343 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, rbd->rbd_tmp_dmap, m,
2344 bwi_dma_buf_addr, &paddr,
2345 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2350 * See the comment above
2359 bus_dmamap_unload(sc->sc_buf_dtag, rxbuf->rb_dmap);
2361 rxbuf->rb_paddr = paddr;
2364 * Swap RX buf's DMA map with the loaded temporary one
2366 map = rxbuf->rb_dmap;
2367 rxbuf->rb_dmap = rbd->rbd_tmp_dmap;
2368 rbd->rbd_tmp_dmap = map;
2372 * Clear RX buf header
2374 hdr = mtod(rxbuf->rb_mbuf, struct bwi_rxbuf_hdr *);
2375 bzero(hdr, sizeof(*hdr));
2376 bus_dmamap_sync(sc->sc_buf_dtag, rxbuf->rb_dmap, BUS_DMASYNC_PREWRITE);
2379 * Setup RX buf descriptor
2381 sc->sc_setup_rxdesc(sc, buf_idx, rxbuf->rb_paddr,
2382 rxbuf->rb_mbuf->m_len - sizeof(*hdr));
2387 bwi_set_addr_filter(struct bwi_softc *sc, uint16_t addr_ofs,
2388 const uint8_t *addr)
2392 CSR_WRITE_2(sc, BWI_ADDR_FILTER_CTRL,
2393 BWI_ADDR_FILTER_CTRL_SET | addr_ofs);
2395 for (i = 0; i < (IEEE80211_ADDR_LEN / 2); ++i) {
2398 addr_val = (uint16_t)addr[i * 2] |
2399 (((uint16_t)addr[(i * 2) + 1]) << 8);
2400 CSR_WRITE_2(sc, BWI_ADDR_FILTER_DATA, addr_val);
2405 bwi_set_chan(struct bwi_softc *sc, struct ieee80211_channel *c)
2407 struct ieee80211com *ic = &sc->sc_ic;
2408 struct ifnet *ifp = &ic->ic_if;
2409 struct bwi_mac *mac;
2412 ASSERT_SERIALIZED(ifp->if_serializer);
2414 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
2415 mac = (struct bwi_mac *)sc->sc_cur_regwin;
2417 chan = ieee80211_chan2ieee(ic, c);
2419 bwi_rf_set_chan(mac, chan, 0);
2421 /* TODO: radio tap */
2427 bwi_next_scan(void *xsc)
2429 struct bwi_softc *sc = xsc;
2430 struct ieee80211com *ic = &sc->sc_ic;
2431 struct ifnet *ifp = &ic->ic_if;
2433 lwkt_serialize_enter(ifp->if_serializer);
2435 if (ic->ic_state == IEEE80211_S_SCAN)
2436 ieee80211_next_scan(ic);
2438 lwkt_serialize_exit(ifp->if_serializer);
2442 bwi_rxeof(struct bwi_softc *sc, int end_idx)
2444 struct bwi_ring_data *rd = &sc->sc_rx_rdata;
2445 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
2446 struct ieee80211com *ic = &sc->sc_ic;
2447 struct ifnet *ifp = &ic->ic_if;
2451 while (idx != end_idx) {
2452 struct bwi_rxbuf *rb = &rbd->rbd_buf[idx];
2453 struct bwi_rxbuf_hdr *hdr;
2454 struct ieee80211_frame_min *wh;
2455 struct ieee80211_node *ni;
2457 uint8_t plcp_signal;
2459 int buflen, wh_ofs, hdr_extra, rssi;
2462 bus_dmamap_sync(sc->sc_buf_dtag, rb->rb_dmap,
2463 BUS_DMASYNC_POSTREAD);
2465 if (bwi_newbuf(sc, idx, 0)) {
2470 hdr = mtod(m, struct bwi_rxbuf_hdr *);
2471 flags2 = le16toh(hdr->rxh_flags2);
2474 if (flags2 & BWI_RXH_F2_TYPE2FRAME)
2476 wh_ofs = hdr_extra + 6;
2478 buflen = le16toh(hdr->rxh_buflen);
2479 if (buflen <= wh_ofs) {
2480 if_printf(ifp, "zero length data, hdr_extra %d\n",
2487 plcp_signal = *((uint8_t *)(hdr + 1) + hdr_extra);
2488 rssi = bwi_calc_rssi(sc, hdr) - BWI_NOISE_FLOOR;
2490 m->m_pkthdr.rcvif = ifp;
2491 m->m_len = m->m_pkthdr.len = buflen + sizeof(*hdr);
2492 m_adj(m, sizeof(*hdr) + wh_ofs);
2494 /* TODO: radio tap */
2496 m_adj(m, -IEEE80211_CRC_LEN);
2498 wh = mtod(m, struct ieee80211_frame_min *);
2499 ni = ieee80211_find_rxnode(ic, wh);
2501 ieee80211_input(ic, m, ni, rssi, le16toh(hdr->rxh_tsf));
2502 ieee80211_free_node(ni);
2504 idx = (idx + 1) % BWI_RX_NDESC;
2508 bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap,
2509 BUS_DMASYNC_PREWRITE);
2513 bwi_rxeof32(struct bwi_softc *sc)
2515 uint32_t val, rx_ctrl;
2518 rx_ctrl = sc->sc_rx_rdata.rdata_txrx_ctrl;
2520 val = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS);
2521 end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) /
2522 sizeof(struct bwi_desc32);
2524 bwi_rxeof(sc, end_idx);
2526 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_INDEX,
2527 end_idx * sizeof(struct bwi_desc32));
2531 bwi_rxeof64(struct bwi_softc *sc)
2537 bwi_reset_rx_ring32(struct bwi_softc *sc, uint32_t rx_ctrl)
2541 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_CTRL, 0);
2545 for (i = 0; i < NRETRY; ++i) {
2548 status = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS);
2549 if (__SHIFTOUT(status, BWI_RX32_STATUS_STATE_MASK) ==
2550 BWI_RX32_STATUS_STATE_DISABLED)
2556 if_printf(&sc->sc_ic.ic_if, "reset rx ring timedout\n");
2560 CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_RINGINFO, 0);
2564 bwi_free_txstats32(struct bwi_softc *sc)
2566 bwi_reset_rx_ring32(sc, sc->sc_txstats->stats_ctrl_base);
2570 bwi_free_rx_ring32(struct bwi_softc *sc)
2572 struct bwi_ring_data *rd = &sc->sc_rx_rdata;
2573 struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
2576 bwi_reset_rx_ring32(sc, rd->rdata_txrx_ctrl);
2578 for (i = 0; i < BWI_RX_NDESC; ++i) {
2579 struct bwi_rxbuf *rb = &rbd->rbd_buf[i];
2581 if (rb->rb_mbuf != NULL) {
2582 bus_dmamap_unload(sc->sc_buf_dtag, rb->rb_dmap);
2583 m_freem(rb->rb_mbuf);
2590 bwi_free_tx_ring32(struct bwi_softc *sc, int ring_idx)
2592 struct bwi_ring_data *rd;
2593 struct bwi_txbuf_data *tbd;
2594 struct ifnet *ifp = &sc->sc_ic.ic_if;
2595 uint32_t state, val;
2598 KKASSERT(ring_idx < BWI_TX_NRING);
2599 rd = &sc->sc_tx_rdata[ring_idx];
2600 tbd = &sc->sc_tx_bdata[ring_idx];
2604 for (i = 0; i < NRETRY; ++i) {
2605 val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS);
2606 state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK);
2607 if (state == BWI_TX32_STATUS_STATE_DISABLED ||
2608 state == BWI_TX32_STATUS_STATE_IDLE ||
2609 state == BWI_TX32_STATUS_STATE_STOPPED)
2615 if_printf(ifp, "wait for TX ring(%d) stable timed out\n",
2619 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, 0);
2620 for (i = 0; i < NRETRY; ++i) {
2621 val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS);
2622 state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK);
2623 if (state == BWI_TX32_STATUS_STATE_DISABLED)
2629 if_printf(ifp, "reset TX ring (%d) timed out\n", ring_idx);
2635 CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, 0);
2637 for (i = 0; i < BWI_TX_NDESC; ++i) {
2638 struct bwi_txbuf *tb = &tbd->tbd_buf[i];
2640 if (tb->tb_mbuf != NULL) {
2641 bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap);
2642 m_freem(tb->tb_mbuf);
2645 if (tb->tb_ni != NULL) {
2646 ieee80211_free_node(tb->tb_ni);
2653 bwi_free_txstats64(struct bwi_softc *sc)
2659 bwi_free_rx_ring64(struct bwi_softc *sc)
2665 bwi_free_tx_ring64(struct bwi_softc *sc, int ring_idx)
2670 /* XXX does not belong here */
2672 bwi_rate2plcp(uint8_t rate)
2674 rate &= IEEE80211_RATE_VAL;
2678 case 4: return 0x14;
2679 case 11: return 0x37;
2680 case 22: return 0x6e;
2681 case 44: return 0xdc;
2683 case 12: return 0xb;
2684 case 18: return 0xf;
2685 case 24: return 0xa;
2686 case 36: return 0xe;
2687 case 48: return 0x9;
2688 case 72: return 0xd;
2689 case 96: return 0x8;
2690 case 108: return 0xc;
2693 panic("unsupported rate %u\n", rate);
2697 static __inline void
2698 bwi_ofdm_plcp_header(uint32_t *plcp0, int pkt_len, uint8_t rate)
2700 /* XXX does not belong here */
2701 #define IEEE80211_OFDM_PLCP_SIG_MASK __BITS(3, 0)
2702 #define IEEE80211_OFDM_PLCP_LEN_MASK __BITS(16, 5)
2706 plcp = __SHIFTIN(bwi_rate2plcp(rate), IEEE80211_OFDM_PLCP_SIG_MASK) |
2707 __SHIFTIN(pkt_len, IEEE80211_OFDM_PLCP_LEN_MASK);
2708 *plcp0 = htole32(plcp);
2711 /* XXX does not belong here */
2712 struct ieee80211_ds_plcp_hdr {
2719 #define IEEE80211_DS_PLCP_SERVICE_LOCKED 0x04
2720 #define IEEE80211_DS_PLCL_SERVICE_PBCC 0x08
2721 #define IEEE80211_DS_PLCP_SERVICE_LENEXT5 0x20
2722 #define IEEE80211_DS_PLCP_SERVICE_LENEXT6 0x40
2723 #define IEEE80211_DS_PLCP_SERVICE_LENEXT7 0x80
2725 static __inline void
2726 bwi_ds_plcp_header(struct ieee80211_ds_plcp_hdr *plcp, int pkt_len,
2729 int len, service, pkt_bitlen;
2731 pkt_bitlen = pkt_len * NBBY;
2732 len = howmany(pkt_bitlen * 2, rate);
2734 service = IEEE80211_DS_PLCP_SERVICE_LOCKED;
2735 if (rate == (11 * 2)) {
2739 * PLCP service field needs to be adjusted,
2740 * if TX rate is 11Mbytes/s
2742 pkt_bitlen1 = len * 11;
2743 if (pkt_bitlen1 - pkt_bitlen >= NBBY)
2744 service |= IEEE80211_DS_PLCP_SERVICE_LENEXT7;
2747 plcp->i_signal = bwi_rate2plcp(rate);
2748 plcp->i_service = service;
2749 plcp->i_length = htole16(len);
2750 /* NOTE: do NOT touch i_crc */
2753 static __inline void
2754 bwi_plcp_header(void *plcp, int pkt_len, uint8_t rate)
2756 enum ieee80211_modtype modtype;
2759 * Assume caller has zeroed 'plcp'
2762 modtype = ieee80211_rate2modtype(rate);
2763 if (modtype == IEEE80211_MODTYPE_OFDM)
2764 bwi_ofdm_plcp_header(plcp, pkt_len, rate);
2765 else if (modtype == IEEE80211_MODTYPE_DS)
2766 bwi_ds_plcp_header(plcp, pkt_len, rate);
2768 panic("unsupport modulation type %u\n", modtype);
2772 bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m,
2773 struct ieee80211_node *ni)
2775 struct ieee80211com *ic = &sc->sc_ic;
2776 struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING];
2777 struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
2778 struct bwi_txbuf *tb = &tbd->tbd_buf[idx];
2779 struct bwi_mac *mac;
2780 struct bwi_txbuf_hdr *hdr;
2781 struct ieee80211_frame *wh;
2782 uint8_t rate, rate_fb;
2792 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
2793 mac = (struct bwi_mac *)sc->sc_cur_regwin;
2795 wh = mtod(m, struct ieee80211_frame *);
2797 /* Get 802.11 frame len before prepending TX header */
2798 pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN;
2803 bzero(tb->tb_rate_idx, sizeof(tb->tb_rate_idx));
2805 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2808 rate = IEEE80211_RS_RATE(&ni->ni_rates,
2811 if (ic->ic_fixed_rate >= 1)
2812 idx = ic->ic_fixed_rate - 1;
2815 rate_fb = IEEE80211_RS_RATE(&ni->ni_rates, idx);
2817 /* TODO: TX rate control */
2818 rate = rate_fb = (1 * 2);
2821 /* Fixed at 1Mbytes/s for mgt frames */
2822 rate = rate_fb = (1 * 2);
2825 if (IEEE80211_IS_MULTICAST(wh->i_addr1))
2826 rate = rate_fb = ic->ic_mcast_rate;
2828 if (rate == 0 || rate_fb == 0) {
2829 if_printf(&ic->ic_if, "invalid rate %u or fallback rate %u",
2831 rate = rate_fb = (1 * 2); /* Force 1Mbytes/s */
2834 /* TODO: radio tap */
2837 * Setup the embedded TX header
2839 M_PREPEND(m, sizeof(*hdr), MB_DONTWAIT);
2841 if_printf(&ic->ic_if, "prepend TX header failed\n");
2844 hdr = mtod(m, struct bwi_txbuf_hdr *);
2846 bzero(hdr, sizeof(*hdr));
2848 bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc));
2849 bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1));
2851 if (ni != NULL && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2855 ack_rate = ieee80211_ack_rate(ni, rate_fb);
2856 dur = ieee80211_txtime(ni,
2857 sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN,
2858 ack_rate, ic->ic_flags & ~IEEE80211_F_SHPREAMBLE);
2860 hdr->txh_fb_duration = htole16(dur);
2863 hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) |
2864 __SHIFTIN(idx, BWI_TXH_ID_IDX_MASK);
2866 bwi_plcp_header(hdr->txh_plcp, pkt_len, rate);
2867 bwi_plcp_header(hdr->txh_fb_plcp, pkt_len, rate_fb);
2869 phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode,
2870 BWI_TXH_PHY_C_ANTMODE_MASK);
2871 if (ieee80211_rate2modtype(rate) == IEEE80211_MODTYPE_OFDM)
2872 phy_ctrl |= BWI_TXH_PHY_C_OFDM;
2873 else if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && rate != (2 * 1))
2874 phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE;
2876 mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG;
2877 if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
2878 mac_ctrl |= BWI_TXH_MAC_C_ACK;
2879 if (ieee80211_rate2modtype(rate_fb) == IEEE80211_MODTYPE_OFDM)
2880 mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM;
2882 hdr->txh_mac_ctrl = htole32(mac_ctrl);
2883 hdr->txh_phy_ctrl = htole16(phy_ctrl);
2885 /* Catch any further usage */
2890 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m,
2891 bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT);
2892 if (error && error != EFBIG) {
2893 if_printf(&ic->ic_if, "can't load TX buffer (1) %d\n", error);
2897 if (error) { /* error == EFBIG */
2900 m_new = m_defrag(m, MB_DONTWAIT);
2901 if (m_new == NULL) {
2902 if_printf(&ic->ic_if, "can't defrag TX buffer\n");
2909 error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m,
2910 bwi_dma_buf_addr, &paddr,
2913 if_printf(&ic->ic_if, "can't load TX buffer (2) %d\n",
2920 bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE);
2926 p = mtod(m, const uint8_t *);
2927 for (i = 0; i < m->m_pkthdr.len; ++i) {
2928 if (i != 0 && i % 8 == 0)
2930 kprintf("%02x ", p[i]);
2934 if_printf(&ic->ic_if, "idx %d, pkt_len %d, buflen %d\n",
2935 idx, pkt_len, m->m_pkthdr.len);
2938 /* Setup TX descriptor */
2939 sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len);
2940 bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap,
2941 BUS_DMASYNC_PREWRITE);
2944 sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx);
2953 bwi_start_tx32(struct bwi_softc *sc, uint32_t tx_ctrl, int idx)
2955 idx = (idx + 1) % BWI_TX_NDESC;
2956 CSR_WRITE_4(sc, tx_ctrl + BWI_TX32_INDEX,
2957 idx * sizeof(struct bwi_desc32));
2961 bwi_start_tx64(struct bwi_softc *sc, uint32_t tx_ctrl, int idx)
2967 bwi_txeof_status32(struct bwi_softc *sc)
2969 struct ifnet *ifp = &sc->sc_ic.ic_if;
2970 uint32_t val, ctrl_base;
2973 ctrl_base = sc->sc_txstats->stats_ctrl_base;
2975 val = CSR_READ_4(sc, ctrl_base + BWI_RX32_STATUS);
2976 end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) /
2977 sizeof(struct bwi_desc32);
2979 bwi_txeof_status(sc, end_idx);
2981 CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX,
2982 end_idx * sizeof(struct bwi_desc32));
2984 if ((ifp->if_flags & IFF_OACTIVE) == 0)
2989 bwi_txeof_status64(struct bwi_softc *sc)
2995 _bwi_txeof(struct bwi_softc *sc, uint16_t tx_id)
2997 struct ifnet *ifp = &sc->sc_ic.ic_if;
2998 struct bwi_txbuf_data *tbd;
2999 struct bwi_txbuf *tb;
3000 int ring_idx, buf_idx;
3003 if_printf(ifp, "zero tx id\n");
3007 ring_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_RING_MASK);
3008 buf_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_IDX_MASK);
3010 KKASSERT(ring_idx == BWI_TX_DATA_RING);
3011 KKASSERT(buf_idx < BWI_TX_NDESC);
3013 if_printf(ifp, "txeof idx %d\n", buf_idx);
3016 tbd = &sc->sc_tx_bdata[ring_idx];
3017 KKASSERT(tbd->tbd_used > 0);
3020 tb = &tbd->tbd_buf[buf_idx];
3022 bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap);
3023 m_freem(tb->tb_mbuf);
3026 if (tb->tb_ni != NULL) {
3027 ieee80211_free_node(tb->tb_ni);
3031 if (tbd->tbd_used == 0)
3032 sc->sc_tx_timer = 0;
3034 ifp->if_flags &= ~IFF_OACTIVE;
3038 bwi_txeof_status(struct bwi_softc *sc, int end_idx)
3040 struct bwi_txstats_data *st = sc->sc_txstats;
3043 bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_POSTREAD);
3045 idx = st->stats_idx;
3046 while (idx != end_idx) {
3047 _bwi_txeof(sc, le16toh(st->stats[idx].txs_id));
3048 idx = (idx + 1) % BWI_TXSTATS_NDESC;
3050 st->stats_idx = idx;
3054 bwi_txeof(struct bwi_softc *sc)
3056 struct ifnet *ifp = &sc->sc_ic.ic_if;
3059 uint32_t tx_status0, tx_status1;
3060 uint16_t tx_id, tx_info;
3062 tx_status0 = CSR_READ_4(sc, BWI_TXSTATUS_0);
3063 if (tx_status0 == 0)
3065 tx_status1 = CSR_READ_4(sc, BWI_TXSTATUS_1);
3067 tx_id = __SHIFTOUT(tx_status0, BWI_TXSTATUS_0_TXID_MASK);
3068 tx_info = BWI_TXSTATUS_0_INFO(tx_status0);
3070 if (tx_info & 0x30) /* XXX */
3073 _bwi_txeof(sc, tx_id);
3076 if ((ifp->if_flags & IFF_OACTIVE) == 0)
3081 bwi_bbp_power_on(struct bwi_softc *sc, enum bwi_clock_mode clk_mode)
3083 bwi_power_on(sc, 1);
3084 return bwi_set_clock_mode(sc, clk_mode);
3088 bwi_bbp_power_off(struct bwi_softc *sc)
3090 bwi_set_clock_mode(sc, BWI_CLOCK_MODE_SLOW);
3091 bwi_power_off(sc, 1);
3095 bwi_get_pwron_delay(struct bwi_softc *sc)
3097 struct bwi_regwin *com, *old;
3098 struct bwi_clock_freq freq;
3102 com = &sc->sc_com_regwin;
3103 KKASSERT(BWI_REGWIN_EXIST(com));
3105 if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0)
3108 error = bwi_regwin_switch(sc, com, &old);
3112 bwi_get_clock_freq(sc, &freq);
3114 val = CSR_READ_4(sc, BWI_PLL_ON_DELAY);
3115 sc->sc_pwron_delay = howmany((val + 2) * 1000000, freq.clkfreq_min);
3116 DPRINTF(sc, "power on delay %u\n", sc->sc_pwron_delay);
3118 return bwi_regwin_switch(sc, old, NULL);
3122 bwi_bus_attach(struct bwi_softc *sc)
3124 struct bwi_regwin *bus, *old;
3127 bus = &sc->sc_bus_regwin;
3129 error = bwi_regwin_switch(sc, bus, &old);
3133 if (!bwi_regwin_is_enabled(sc, bus))
3134 bwi_regwin_enable(sc, bus, 0);
3136 /* Disable interripts */
3137 CSR_WRITE_4(sc, BWI_INTRVEC, 0);
3139 return bwi_regwin_switch(sc, old, NULL);
3143 bwi_regwin_name(const struct bwi_regwin *rw)
3145 switch (rw->rw_type) {
3146 case BWI_REGWIN_T_COM:
3148 case BWI_REGWIN_T_BUSPCI:
3150 case BWI_REGWIN_T_MAC:
3152 case BWI_REGWIN_T_BUSPCIE:
3155 panic("unknown regwin type 0x%04x\n", rw->rw_type);
3160 bwi_regwin_disable_bits(struct bwi_softc *sc)
3164 /* XXX cache this */
3165 busrev = __SHIFTOUT(CSR_READ_4(sc, BWI_ID_LO), BWI_ID_LO_BUSREV_MASK);
3166 DPRINTF(sc, "bus rev %u\n", busrev);
3168 if (busrev == BWI_BUSREV_0)
3169 return BWI_STATE_LO_DISABLE1;
3170 else if (busrev == BWI_BUSREV_1)
3171 return BWI_STATE_LO_DISABLE2;
3173 return (BWI_STATE_LO_DISABLE1 | BWI_STATE_LO_DISABLE2);
3177 bwi_regwin_is_enabled(struct bwi_softc *sc, struct bwi_regwin *rw)
3179 uint32_t val, disable_bits;
3181 disable_bits = bwi_regwin_disable_bits(sc);
3182 val = CSR_READ_4(sc, BWI_STATE_LO);
3184 if ((val & (BWI_STATE_LO_CLOCK |
3185 BWI_STATE_LO_RESET |
3186 disable_bits)) == BWI_STATE_LO_CLOCK) {
3187 DPRINTF(sc, "%s is enabled\n", bwi_regwin_name(rw));
3190 DPRINTF(sc, "%s is disabled\n", bwi_regwin_name(rw));
3196 bwi_regwin_disable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags)
3198 uint32_t state_lo, disable_bits;
3201 state_lo = CSR_READ_4(sc, BWI_STATE_LO);
3204 * If current regwin is in 'reset' state, it was already disabled.
3206 if (state_lo & BWI_STATE_LO_RESET) {
3207 DPRINTF(sc, "%s was already disabled\n", bwi_regwin_name(rw));
3211 disable_bits = bwi_regwin_disable_bits(sc);
3214 * Disable normal clock
3216 state_lo = BWI_STATE_LO_CLOCK | disable_bits;
3217 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
3220 * Wait until normal clock is disabled
3223 for (i = 0; i < NRETRY; ++i) {
3224 state_lo = CSR_READ_4(sc, BWI_STATE_LO);
3225 if (state_lo & disable_bits)
3230 device_printf(sc->sc_dev, "%s disable clock timeout\n",
3231 bwi_regwin_name(rw));
3234 for (i = 0; i < NRETRY; ++i) {
3237 state_hi = CSR_READ_4(sc, BWI_STATE_HI);
3238 if ((state_hi & BWI_STATE_HI_BUSY) == 0)
3243 device_printf(sc->sc_dev, "%s wait BUSY unset timeout\n",
3244 bwi_regwin_name(rw));
3249 * Reset and disable regwin with gated clock
3251 state_lo = BWI_STATE_LO_RESET | disable_bits |
3252 BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK |
3253 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
3254 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
3256 /* Flush pending bus write */
3257 CSR_READ_4(sc, BWI_STATE_LO);
3260 /* Reset and disable regwin */
3261 state_lo = BWI_STATE_LO_RESET | disable_bits |
3262 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
3263 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
3265 /* Flush pending bus write */
3266 CSR_READ_4(sc, BWI_STATE_LO);
3271 bwi_regwin_enable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags)
3273 uint32_t state_lo, state_hi, imstate;
3275 bwi_regwin_disable(sc, rw, flags);
3277 /* Reset regwin with gated clock */
3278 state_lo = BWI_STATE_LO_RESET |
3279 BWI_STATE_LO_CLOCK |
3280 BWI_STATE_LO_GATED_CLOCK |
3281 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
3282 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
3284 /* Flush pending bus write */
3285 CSR_READ_4(sc, BWI_STATE_LO);
3288 state_hi = CSR_READ_4(sc, BWI_STATE_HI);
3289 if (state_hi & BWI_STATE_HI_SERROR)
3290 CSR_WRITE_4(sc, BWI_STATE_HI, 0);
3292 imstate = CSR_READ_4(sc, BWI_IMSTATE);
3293 if (imstate & (BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT)) {
3294 imstate &= ~(BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT);
3295 CSR_WRITE_4(sc, BWI_IMSTATE, imstate);
3298 /* Enable regwin with gated clock */
3299 state_lo = BWI_STATE_LO_CLOCK |
3300 BWI_STATE_LO_GATED_CLOCK |
3301 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
3302 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
3304 /* Flush pending bus write */
3305 CSR_READ_4(sc, BWI_STATE_LO);
3308 /* Enable regwin with normal clock */
3309 state_lo = BWI_STATE_LO_CLOCK |
3310 __SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
3311 CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
3313 /* Flush pending bus write */
3314 CSR_READ_4(sc, BWI_STATE_LO);
3319 bwi_set_bssid(struct bwi_softc *sc, const uint8_t *bssid)
3321 struct ieee80211com *ic = &sc->sc_ic;
3322 struct bwi_mac *mac;
3323 struct bwi_myaddr_bssid buf;
3328 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
3329 mac = (struct bwi_mac *)sc->sc_cur_regwin;
3331 bwi_set_addr_filter(sc, BWI_ADDR_FILTER_BSSID, bssid);
3333 bcopy(ic->ic_myaddr, buf.myaddr, sizeof(buf.myaddr));
3334 bcopy(bssid, buf.bssid, sizeof(buf.bssid));
3336 n = sizeof(buf) / sizeof(val);
3337 p = (const uint8_t *)&buf;
3338 for (i = 0; i < n; ++i) {
3342 for (j = 0; j < sizeof(val); ++j)
3343 val |= ((uint32_t)(*p++)) << (j * 8);
3345 TMPLT_WRITE_4(mac, 0x20 + (i * sizeof(val)), val);
3350 bwi_updateslot(struct ifnet *ifp)
3352 struct bwi_softc *sc = ifp->if_softc;
3353 struct ieee80211com *ic = &sc->sc_ic;
3354 struct bwi_mac *mac;
3356 if ((ifp->if_flags & IFF_RUNNING) == 0)
3359 ASSERT_SERIALIZED(ifp->if_serializer);
3361 DPRINTF(sc, "%s\n", __func__);
3363 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
3364 mac = (struct bwi_mac *)sc->sc_cur_regwin;
3366 bwi_mac_updateslot(mac, (ic->ic_flags & IEEE80211_F_SHSLOT));
3370 bwi_calibrate(void *xsc)
3372 struct bwi_softc *sc = xsc;
3373 struct ieee80211com *ic = &sc->sc_ic;
3374 struct ifnet *ifp = &ic->ic_if;
3376 lwkt_serialize_enter(ifp->if_serializer);
3378 if (ic->ic_state == IEEE80211_S_RUN) {
3379 struct bwi_mac *mac;
3381 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
3382 mac = (struct bwi_mac *)sc->sc_cur_regwin;
3384 if (ic->ic_opmode != IEEE80211_M_MONITOR)
3385 bwi_mac_calibrate_txpower(mac);
3387 /* XXX 15 seconds */
3388 callout_reset(&sc->sc_calib_ch, hz * 15, bwi_calibrate, sc);
3391 lwkt_serialize_exit(ifp->if_serializer);
3395 bwi_calc_rssi(struct bwi_softc *sc, const struct bwi_rxbuf_hdr *hdr)
3397 struct bwi_mac *mac;
3399 KKASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC);
3400 mac = (struct bwi_mac *)sc->sc_cur_regwin;
3402 return bwi_rf_calc_rssi(mac, hdr);