| 1 | /* |
| 2 | * Copyright (c) 1997, 1998 |
| 3 | * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * 3. All advertising materials mentioning features or use of this software |
| 14 | * must display the following acknowledgement: |
| 15 | * This product includes software developed by Bill Paul. |
| 16 | * 4. Neither the name of the author nor the names of any co-contributors |
| 17 | * may be used to endorse or promote products derived from this software |
| 18 | * without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND |
| 21 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 23 | * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD |
| 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| 30 | * THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | * $FreeBSD: src/sys/pci/if_tl.c,v 1.51.2.5 2001/12/16 15:46:08 luigi Exp $ |
| 33 | */ |
| 34 | |
| 35 | /* |
| 36 | * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. |
| 37 | * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, |
| 38 | * the National Semiconductor DP83840A physical interface and the |
| 39 | * Microchip Technology 24Cxx series serial EEPROM. |
| 40 | * |
| 41 | * Written using the following four documents: |
| 42 | * |
| 43 | * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) |
| 44 | * National Semiconductor DP83840A data sheet (www.national.com) |
| 45 | * Microchip Technology 24C02C data sheet (www.microchip.com) |
| 46 | * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) |
| 47 | * |
| 48 | * Written by Bill Paul <wpaul@ctr.columbia.edu> |
| 49 | * Electrical Engineering Department |
| 50 | * Columbia University, New York City |
| 51 | */ |
| 52 | |
| 53 | /* |
| 54 | * Some notes about the ThunderLAN: |
| 55 | * |
| 56 | * The ThunderLAN controller is a single chip containing PCI controller |
| 57 | * logic, approximately 3K of on-board SRAM, a LAN controller, and media |
| 58 | * independent interface (MII) bus. The MII allows the ThunderLAN chip to |
| 59 | * control up to 32 different physical interfaces (PHYs). The ThunderLAN |
| 60 | * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller |
| 61 | * to act as a complete ethernet interface. |
| 62 | * |
| 63 | * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards |
| 64 | * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec |
| 65 | * in full or half duplex. Some of the Compaq Deskpro machines use a |
| 66 | * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters |
| 67 | * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in |
| 68 | * concert with the ThunderLAN's internal PHY to provide full 10/100 |
| 69 | * support. This is cheaper than using a standalone external PHY for both |
| 70 | * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. |
| 71 | * A serial EEPROM is also attached to the ThunderLAN chip to provide |
| 72 | * power-up default register settings and for storing the adapter's |
| 73 | * station address. Although not supported by this driver, the ThunderLAN |
| 74 | * chip can also be connected to token ring PHYs. |
| 75 | * |
| 76 | * The ThunderLAN has a set of registers which can be used to issue |
| 77 | * commands, acknowledge interrupts, and to manipulate other internal |
| 78 | * registers on its DIO bus. The primary registers can be accessed |
| 79 | * using either programmed I/O (inb/outb) or via PCI memory mapping, |
| 80 | * depending on how the card is configured during the PCI probing |
| 81 | * phase. It is even possible to have both PIO and memory mapped |
| 82 | * access turned on at the same time. |
| 83 | * |
| 84 | * Frame reception and transmission with the ThunderLAN chip is done |
| 85 | * using frame 'lists.' A list structure looks more or less like this: |
| 86 | * |
| 87 | * struct tl_frag { |
| 88 | * u_int32_t fragment_address; |
| 89 | * u_int32_t fragment_size; |
| 90 | * }; |
| 91 | * struct tl_list { |
| 92 | * u_int32_t forward_pointer; |
| 93 | * u_int16_t cstat; |
| 94 | * u_int16_t frame_size; |
| 95 | * struct tl_frag fragments[10]; |
| 96 | * }; |
| 97 | * |
| 98 | * The forward pointer in the list header can be either a 0 or the address |
| 99 | * of another list, which allows several lists to be linked together. Each |
| 100 | * list contains up to 10 fragment descriptors. This means the chip allows |
| 101 | * ethernet frames to be broken up into up to 10 chunks for transfer to |
| 102 | * and from the SRAM. Note that the forward pointer and fragment buffer |
| 103 | * addresses are physical memory addresses, not virtual. Note also that |
| 104 | * a single ethernet frame can not span lists: if the host wants to |
| 105 | * transmit a frame and the frame data is split up over more than 10 |
| 106 | * buffers, the frame has to collapsed before it can be transmitted. |
| 107 | * |
| 108 | * To receive frames, the driver sets up a number of lists and populates |
| 109 | * the fragment descriptors, then it sends an RX GO command to the chip. |
| 110 | * When a frame is received, the chip will DMA it into the memory regions |
| 111 | * specified by the fragment descriptors and then trigger an RX 'end of |
| 112 | * frame interrupt' when done. The driver may choose to use only one |
| 113 | * fragment per list; this may result is slighltly less efficient use |
| 114 | * of memory in exchange for improving performance. |
| 115 | * |
| 116 | * To transmit frames, the driver again sets up lists and fragment |
| 117 | * descriptors, only this time the buffers contain frame data that |
| 118 | * is to be DMA'ed into the chip instead of out of it. Once the chip |
| 119 | * has transfered the data into its on-board SRAM, it will trigger a |
| 120 | * TX 'end of frame' interrupt. It will also generate an 'end of channel' |
| 121 | * interrupt when it reaches the end of the list. |
| 122 | */ |
| 123 | |
| 124 | /* |
| 125 | * Some notes about this driver: |
| 126 | * |
| 127 | * The ThunderLAN chip provides a couple of different ways to organize |
| 128 | * reception, transmission and interrupt handling. The simplest approach |
| 129 | * is to use one list each for transmission and reception. In this mode, |
| 130 | * the ThunderLAN will generate two interrupts for every received frame |
| 131 | * (one RX EOF and one RX EOC) and two for each transmitted frame (one |
| 132 | * TX EOF and one TX EOC). This may make the driver simpler but it hurts |
| 133 | * performance to have to handle so many interrupts. |
| 134 | * |
| 135 | * Initially I wanted to create a circular list of receive buffers so |
| 136 | * that the ThunderLAN chip would think there was an infinitely long |
| 137 | * receive channel and never deliver an RXEOC interrupt. However this |
| 138 | * doesn't work correctly under heavy load: while the manual says the |
| 139 | * chip will trigger an RXEOF interrupt each time a frame is copied into |
| 140 | * memory, you can't count on the chip waiting around for you to acknowledge |
| 141 | * the interrupt before it starts trying to DMA the next frame. The result |
| 142 | * is that the chip might traverse the entire circular list and then wrap |
| 143 | * around before you have a chance to do anything about it. Consequently, |
| 144 | * the receive list is terminated (with a 0 in the forward pointer in the |
| 145 | * last element). Each time an RXEOF interrupt arrives, the used list |
| 146 | * is shifted to the end of the list. This gives the appearance of an |
| 147 | * infinitely large RX chain so long as the driver doesn't fall behind |
| 148 | * the chip and allow all of the lists to be filled up. |
| 149 | * |
| 150 | * If all the lists are filled, the adapter will deliver an RX 'end of |
| 151 | * channel' interrupt when it hits the 0 forward pointer at the end of |
| 152 | * the chain. The RXEOC handler then cleans out the RX chain and resets |
| 153 | * the list head pointer in the ch_parm register and restarts the receiver. |
| 154 | * |
| 155 | * For frame transmission, it is possible to program the ThunderLAN's |
| 156 | * transmit interrupt threshold so that the chip can acknowledge multiple |
| 157 | * lists with only a single TX EOF interrupt. This allows the driver to |
| 158 | * queue several frames in one shot, and only have to handle a total |
| 159 | * two interrupts (one TX EOF and one TX EOC) no matter how many frames |
| 160 | * are transmitted. Frame transmission is done directly out of the |
| 161 | * mbufs passed to the tl_start() routine via the interface send queue. |
| 162 | * The driver simply sets up the fragment descriptors in the transmit |
| 163 | * lists to point to the mbuf data regions and sends a TX GO command. |
| 164 | * |
| 165 | * Note that since the RX and TX lists themselves are always used |
| 166 | * only by the driver, the are malloc()ed once at driver initialization |
| 167 | * time and never free()ed. |
| 168 | * |
| 169 | * Also, in order to remain as platform independent as possible, this |
| 170 | * driver uses memory mapped register access to manipulate the card |
| 171 | * as opposed to programmed I/O. This avoids the use of the inb/outb |
| 172 | * (and related) instructions which are specific to the i386 platform. |
| 173 | * |
| 174 | * Using these techniques, this driver achieves very high performance |
| 175 | * by minimizing the amount of interrupts generated during large |
| 176 | * transfers and by completely avoiding buffer copies. Frame transfer |
| 177 | * to and from the ThunderLAN chip is performed entirely by the chip |
| 178 | * itself thereby reducing the load on the host CPU. |
| 179 | */ |
| 180 | |
| 181 | #include <sys/param.h> |
| 182 | #include <sys/systm.h> |
| 183 | #include <sys/sockio.h> |
| 184 | #include <sys/mbuf.h> |
| 185 | #include <sys/malloc.h> |
| 186 | #include <sys/kernel.h> |
| 187 | #include <sys/socket.h> |
| 188 | #include <sys/serialize.h> |
| 189 | #include <sys/bus.h> |
| 190 | #include <sys/rman.h> |
| 191 | #include <sys/thread2.h> |
| 192 | #include <sys/interrupt.h> |
| 193 | |
| 194 | #include <net/if.h> |
| 195 | #include <net/ifq_var.h> |
| 196 | #include <net/if_arp.h> |
| 197 | #include <net/ethernet.h> |
| 198 | #include <net/if_dl.h> |
| 199 | #include <net/if_media.h> |
| 200 | |
| 201 | #include <net/bpf.h> |
| 202 | |
| 203 | #include <vm/vm.h> /* for vtophys */ |
| 204 | #include <vm/pmap.h> /* for vtophys */ |
| 205 | |
| 206 | #include "../mii_layer/mii.h" |
| 207 | #include "../mii_layer/miivar.h" |
| 208 | |
| 209 | #include <bus/pci/pcireg.h> |
| 210 | #include <bus/pci/pcivar.h> |
| 211 | |
| 212 | /* |
| 213 | * Default to using PIO register access mode to pacify certain |
| 214 | * laptop docking stations with built-in ThunderLAN chips that |
| 215 | * don't seem to handle memory mapped mode properly. |
| 216 | */ |
| 217 | #define TL_USEIOSPACE |
| 218 | |
| 219 | #include "if_tlreg.h" |
| 220 | |
| 221 | /* "controller miibus0" required. See GENERIC if you get errors here. */ |
| 222 | #include "miibus_if.h" |
| 223 | |
| 224 | /* |
| 225 | * Various supported device vendors/types and their names. |
| 226 | */ |
| 227 | |
| 228 | static struct tl_type tl_devs[] = { |
| 229 | { TI_VENDORID, TI_DEVICEID_THUNDERLAN, |
| 230 | "Texas Instruments ThunderLAN" }, |
| 231 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, |
| 232 | "Compaq Netelligent 10" }, |
| 233 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, |
| 234 | "Compaq Netelligent 10/100" }, |
| 235 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, |
| 236 | "Compaq Netelligent 10/100 Proliant" }, |
| 237 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, |
| 238 | "Compaq Netelligent 10/100 Dual Port" }, |
| 239 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, |
| 240 | "Compaq NetFlex-3/P Integrated" }, |
| 241 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, |
| 242 | "Compaq NetFlex-3/P" }, |
| 243 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, |
| 244 | "Compaq NetFlex 3/P w/ BNC" }, |
| 245 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, |
| 246 | "Compaq Netelligent 10/100 TX Embedded UTP" }, |
| 247 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, |
| 248 | "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, |
| 249 | { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, |
| 250 | "Compaq Netelligent 10/100 TX UTP" }, |
| 251 | { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, |
| 252 | "Olicom OC-2183/2185" }, |
| 253 | { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, |
| 254 | "Olicom OC-2325" }, |
| 255 | { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, |
| 256 | "Olicom OC-2326 10/100 TX UTP" }, |
| 257 | { 0, 0, NULL } |
| 258 | }; |
| 259 | |
| 260 | static int tl_probe (device_t); |
| 261 | static int tl_attach (device_t); |
| 262 | static int tl_detach (device_t); |
| 263 | static int tl_intvec_rxeoc (void *, u_int32_t); |
| 264 | static int tl_intvec_txeoc (void *, u_int32_t); |
| 265 | static int tl_intvec_txeof (void *, u_int32_t); |
| 266 | static int tl_intvec_rxeof (void *, u_int32_t); |
| 267 | static int tl_intvec_adchk (void *, u_int32_t); |
| 268 | static int tl_intvec_netsts (void *, u_int32_t); |
| 269 | |
| 270 | static int tl_newbuf (struct tl_softc *, |
| 271 | struct tl_chain_onefrag *); |
| 272 | static void tl_stats_update (void *); |
| 273 | static void tl_stats_update_serialized(void *); |
| 274 | static int tl_encap (struct tl_softc *, struct tl_chain *, |
| 275 | struct mbuf *); |
| 276 | |
| 277 | static void tl_intr (void *); |
| 278 | static void tl_start (struct ifnet *); |
| 279 | static int tl_ioctl (struct ifnet *, u_long, caddr_t, |
| 280 | struct ucred *); |
| 281 | static void tl_init (void *); |
| 282 | static void tl_stop (struct tl_softc *); |
| 283 | static void tl_watchdog (struct ifnet *); |
| 284 | static void tl_shutdown (device_t); |
| 285 | static int tl_ifmedia_upd (struct ifnet *); |
| 286 | static void tl_ifmedia_sts (struct ifnet *, struct ifmediareq *); |
| 287 | |
| 288 | static u_int8_t tl_eeprom_putbyte (struct tl_softc *, int); |
| 289 | static u_int8_t tl_eeprom_getbyte (struct tl_softc *, |
| 290 | int, u_int8_t *); |
| 291 | static int tl_read_eeprom (struct tl_softc *, caddr_t, int, int); |
| 292 | |
| 293 | static void tl_mii_sync (struct tl_softc *); |
| 294 | static void tl_mii_send (struct tl_softc *, u_int32_t, int); |
| 295 | static int tl_mii_readreg (struct tl_softc *, struct tl_mii_frame *); |
| 296 | static int tl_mii_writereg (struct tl_softc *, struct tl_mii_frame *); |
| 297 | static int tl_miibus_readreg (device_t, int, int); |
| 298 | static int tl_miibus_writereg (device_t, int, int, int); |
| 299 | static void tl_miibus_statchg (device_t); |
| 300 | |
| 301 | static void tl_setmode (struct tl_softc *, int); |
| 302 | static int tl_calchash (caddr_t); |
| 303 | static void tl_setmulti (struct tl_softc *); |
| 304 | static void tl_setfilt (struct tl_softc *, caddr_t, int); |
| 305 | static void tl_softreset (struct tl_softc *, int); |
| 306 | static void tl_hardreset (device_t); |
| 307 | static int tl_list_rx_init (struct tl_softc *); |
| 308 | static int tl_list_tx_init (struct tl_softc *); |
| 309 | |
| 310 | static u_int8_t tl_dio_read8 (struct tl_softc *, int); |
| 311 | static u_int16_t tl_dio_read16 (struct tl_softc *, int); |
| 312 | static u_int32_t tl_dio_read32 (struct tl_softc *, int); |
| 313 | static void tl_dio_write8 (struct tl_softc *, int, int); |
| 314 | static void tl_dio_write16 (struct tl_softc *, int, int); |
| 315 | static void tl_dio_write32 (struct tl_softc *, int, int); |
| 316 | static void tl_dio_setbit (struct tl_softc *, int, int); |
| 317 | static void tl_dio_clrbit (struct tl_softc *, int, int); |
| 318 | static void tl_dio_setbit16 (struct tl_softc *, int, int); |
| 319 | static void tl_dio_clrbit16 (struct tl_softc *, int, int); |
| 320 | |
| 321 | #ifdef TL_USEIOSPACE |
| 322 | #define TL_RES SYS_RES_IOPORT |
| 323 | #define TL_RID TL_PCI_LOIO |
| 324 | #else |
| 325 | #define TL_RES SYS_RES_MEMORY |
| 326 | #define TL_RID TL_PCI_LOMEM |
| 327 | #endif |
| 328 | |
| 329 | static device_method_t tl_methods[] = { |
| 330 | /* Device interface */ |
| 331 | DEVMETHOD(device_probe, tl_probe), |
| 332 | DEVMETHOD(device_attach, tl_attach), |
| 333 | DEVMETHOD(device_detach, tl_detach), |
| 334 | DEVMETHOD(device_shutdown, tl_shutdown), |
| 335 | |
| 336 | /* bus interface */ |
| 337 | DEVMETHOD(bus_print_child, bus_generic_print_child), |
| 338 | DEVMETHOD(bus_driver_added, bus_generic_driver_added), |
| 339 | |
| 340 | /* MII interface */ |
| 341 | DEVMETHOD(miibus_readreg, tl_miibus_readreg), |
| 342 | DEVMETHOD(miibus_writereg, tl_miibus_writereg), |
| 343 | DEVMETHOD(miibus_statchg, tl_miibus_statchg), |
| 344 | |
| 345 | { 0, 0 } |
| 346 | }; |
| 347 | |
| 348 | static driver_t tl_driver = { |
| 349 | "tl", |
| 350 | tl_methods, |
| 351 | sizeof(struct tl_softc) |
| 352 | }; |
| 353 | |
| 354 | static devclass_t tl_devclass; |
| 355 | |
| 356 | DECLARE_DUMMY_MODULE(if_tl); |
| 357 | DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, NULL, NULL); |
| 358 | DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, NULL, NULL); |
| 359 | |
| 360 | static u_int8_t |
| 361 | tl_dio_read8(struct tl_softc *sc, int reg) |
| 362 | { |
| 363 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 364 | return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); |
| 365 | } |
| 366 | |
| 367 | static u_int16_t |
| 368 | tl_dio_read16(struct tl_softc *sc, int reg) |
| 369 | { |
| 370 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 371 | return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); |
| 372 | } |
| 373 | |
| 374 | static u_int32_t |
| 375 | tl_dio_read32(struct tl_softc *sc, int reg) |
| 376 | { |
| 377 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 378 | return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); |
| 379 | } |
| 380 | |
| 381 | static void |
| 382 | tl_dio_write8(struct tl_softc *sc, int reg, int val) |
| 383 | { |
| 384 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 385 | CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); |
| 386 | return; |
| 387 | } |
| 388 | |
| 389 | static void |
| 390 | tl_dio_write16(struct tl_softc *sc, int reg, int val) |
| 391 | { |
| 392 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 393 | CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); |
| 394 | return; |
| 395 | } |
| 396 | |
| 397 | static void |
| 398 | tl_dio_write32(struct tl_softc *sc, int reg, int val) |
| 399 | { |
| 400 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 401 | CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); |
| 402 | return; |
| 403 | } |
| 404 | |
| 405 | static void |
| 406 | tl_dio_setbit(struct tl_softc *sc, int reg, int bit) |
| 407 | { |
| 408 | u_int8_t f; |
| 409 | |
| 410 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 411 | f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); |
| 412 | f |= bit; |
| 413 | CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); |
| 414 | |
| 415 | return; |
| 416 | } |
| 417 | |
| 418 | static void |
| 419 | tl_dio_clrbit(struct tl_softc *sc, int reg, int bit) |
| 420 | { |
| 421 | u_int8_t f; |
| 422 | |
| 423 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 424 | f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); |
| 425 | f &= ~bit; |
| 426 | CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); |
| 427 | |
| 428 | return; |
| 429 | } |
| 430 | |
| 431 | static void |
| 432 | tl_dio_setbit16(struct tl_softc *sc, int reg, int bit) |
| 433 | { |
| 434 | u_int16_t f; |
| 435 | |
| 436 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 437 | f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); |
| 438 | f |= bit; |
| 439 | CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); |
| 440 | |
| 441 | return; |
| 442 | } |
| 443 | |
| 444 | static void |
| 445 | tl_dio_clrbit16(struct tl_softc *sc, int reg, int bit) |
| 446 | { |
| 447 | u_int16_t f; |
| 448 | |
| 449 | CSR_WRITE_2(sc, TL_DIO_ADDR, reg); |
| 450 | f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); |
| 451 | f &= ~bit; |
| 452 | CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); |
| 453 | |
| 454 | return; |
| 455 | } |
| 456 | |
| 457 | /* |
| 458 | * Send an instruction or address to the EEPROM, check for ACK. |
| 459 | */ |
| 460 | static u_int8_t |
| 461 | tl_eeprom_putbyte(struct tl_softc *sc, int byte) |
| 462 | { |
| 463 | int i, ack = 0; |
| 464 | |
| 465 | /* |
| 466 | * Make sure we're in TX mode. |
| 467 | */ |
| 468 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); |
| 469 | |
| 470 | /* |
| 471 | * Feed in each bit and stobe the clock. |
| 472 | */ |
| 473 | for (i = 0x80; i; i >>= 1) { |
| 474 | if (byte & i) { |
| 475 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); |
| 476 | } else { |
| 477 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); |
| 478 | } |
| 479 | DELAY(1); |
| 480 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); |
| 481 | DELAY(1); |
| 482 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * Turn off TX mode. |
| 487 | */ |
| 488 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); |
| 489 | |
| 490 | /* |
| 491 | * Check for ack. |
| 492 | */ |
| 493 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); |
| 494 | ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; |
| 495 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); |
| 496 | |
| 497 | return(ack); |
| 498 | } |
| 499 | |
| 500 | /* |
| 501 | * Read a byte of data stored in the EEPROM at address 'addr.' |
| 502 | */ |
| 503 | static u_int8_t |
| 504 | tl_eeprom_getbyte(struct tl_softc *sc, int addr, u_int8_t *dest) |
| 505 | { |
| 506 | int i; |
| 507 | u_int8_t byte = 0; |
| 508 | |
| 509 | tl_dio_write8(sc, TL_NETSIO, 0); |
| 510 | |
| 511 | EEPROM_START; |
| 512 | |
| 513 | /* |
| 514 | * Send write control code to EEPROM. |
| 515 | */ |
| 516 | if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { |
| 517 | if_printf(&sc->arpcom.ac_if, "failed to send write command, " |
| 518 | "status: %x\n", tl_dio_read8(sc, TL_NETSIO)); |
| 519 | return(1); |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * Send address of byte we want to read. |
| 524 | */ |
| 525 | if (tl_eeprom_putbyte(sc, addr)) { |
| 526 | if_printf(&sc->arpcom.ac_if, "failed to send address, " |
| 527 | "status: %x\n", tl_dio_read8(sc, TL_NETSIO)); |
| 528 | return(1); |
| 529 | } |
| 530 | |
| 531 | EEPROM_STOP; |
| 532 | EEPROM_START; |
| 533 | /* |
| 534 | * Send read control code to EEPROM. |
| 535 | */ |
| 536 | if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { |
| 537 | if_printf(&sc->arpcom.ac_if, "failed to send write command, " |
| 538 | "status: %x\n", tl_dio_read8(sc, TL_NETSIO)); |
| 539 | return(1); |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * Start reading bits from EEPROM. |
| 544 | */ |
| 545 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); |
| 546 | for (i = 0x80; i; i >>= 1) { |
| 547 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); |
| 548 | DELAY(1); |
| 549 | if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) |
| 550 | byte |= i; |
| 551 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); |
| 552 | DELAY(1); |
| 553 | } |
| 554 | |
| 555 | EEPROM_STOP; |
| 556 | |
| 557 | /* |
| 558 | * No ACK generated for read, so just return byte. |
| 559 | */ |
| 560 | |
| 561 | *dest = byte; |
| 562 | |
| 563 | return(0); |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * Read a sequence of bytes from the EEPROM. |
| 568 | */ |
| 569 | static int |
| 570 | tl_read_eeprom(struct tl_softc *sc, caddr_t dest, int off, int cnt) |
| 571 | { |
| 572 | int err = 0, i; |
| 573 | u_int8_t byte = 0; |
| 574 | |
| 575 | for (i = 0; i < cnt; i++) { |
| 576 | err = tl_eeprom_getbyte(sc, off + i, &byte); |
| 577 | if (err) |
| 578 | break; |
| 579 | *(dest + i) = byte; |
| 580 | } |
| 581 | |
| 582 | return(err ? 1 : 0); |
| 583 | } |
| 584 | |
| 585 | static void |
| 586 | tl_mii_sync(struct tl_softc *sc) |
| 587 | { |
| 588 | int i; |
| 589 | |
| 590 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); |
| 591 | |
| 592 | for (i = 0; i < 32; i++) { |
| 593 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 594 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 595 | } |
| 596 | |
| 597 | return; |
| 598 | } |
| 599 | |
| 600 | static void |
| 601 | tl_mii_send(struct tl_softc *sc, u_int32_t bits, int cnt) |
| 602 | { |
| 603 | int i; |
| 604 | |
| 605 | for (i = (0x1 << (cnt - 1)); i; i >>= 1) { |
| 606 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 607 | if (bits & i) { |
| 608 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); |
| 609 | } else { |
| 610 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); |
| 611 | } |
| 612 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | static int |
| 617 | tl_mii_readreg(struct tl_softc *sc, struct tl_mii_frame *frame) |
| 618 | { |
| 619 | int i, ack; |
| 620 | int minten = 0; |
| 621 | |
| 622 | tl_mii_sync(sc); |
| 623 | |
| 624 | /* |
| 625 | * Set up frame for RX. |
| 626 | */ |
| 627 | frame->mii_stdelim = TL_MII_STARTDELIM; |
| 628 | frame->mii_opcode = TL_MII_READOP; |
| 629 | frame->mii_turnaround = 0; |
| 630 | frame->mii_data = 0; |
| 631 | |
| 632 | /* |
| 633 | * Turn off MII interrupt by forcing MINTEN low. |
| 634 | */ |
| 635 | minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; |
| 636 | if (minten) { |
| 637 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); |
| 638 | } |
| 639 | |
| 640 | /* |
| 641 | * Turn on data xmit. |
| 642 | */ |
| 643 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); |
| 644 | |
| 645 | /* |
| 646 | * Send command/address info. |
| 647 | */ |
| 648 | tl_mii_send(sc, frame->mii_stdelim, 2); |
| 649 | tl_mii_send(sc, frame->mii_opcode, 2); |
| 650 | tl_mii_send(sc, frame->mii_phyaddr, 5); |
| 651 | tl_mii_send(sc, frame->mii_regaddr, 5); |
| 652 | |
| 653 | /* |
| 654 | * Turn off xmit. |
| 655 | */ |
| 656 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); |
| 657 | |
| 658 | /* Idle bit */ |
| 659 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 660 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 661 | |
| 662 | /* Check for ack */ |
| 663 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 664 | ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; |
| 665 | |
| 666 | /* Complete the cycle */ |
| 667 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 668 | |
| 669 | /* |
| 670 | * Now try reading data bits. If the ack failed, we still |
| 671 | * need to clock through 16 cycles to keep the PHYs in sync. |
| 672 | */ |
| 673 | if (ack) { |
| 674 | for(i = 0; i < 16; i++) { |
| 675 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 676 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 677 | } |
| 678 | goto fail; |
| 679 | } |
| 680 | |
| 681 | for (i = 0x8000; i; i >>= 1) { |
| 682 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 683 | if (!ack) { |
| 684 | if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) |
| 685 | frame->mii_data |= i; |
| 686 | } |
| 687 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 688 | } |
| 689 | |
| 690 | fail: |
| 691 | |
| 692 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 693 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 694 | |
| 695 | /* Reenable interrupts */ |
| 696 | if (minten) { |
| 697 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); |
| 698 | } |
| 699 | |
| 700 | if (ack) |
| 701 | return(1); |
| 702 | return(0); |
| 703 | } |
| 704 | |
| 705 | static int |
| 706 | tl_mii_writereg(struct tl_softc *sc, struct tl_mii_frame *frame) |
| 707 | { |
| 708 | int minten; |
| 709 | |
| 710 | tl_mii_sync(sc); |
| 711 | |
| 712 | /* |
| 713 | * Set up frame for TX. |
| 714 | */ |
| 715 | |
| 716 | frame->mii_stdelim = TL_MII_STARTDELIM; |
| 717 | frame->mii_opcode = TL_MII_WRITEOP; |
| 718 | frame->mii_turnaround = TL_MII_TURNAROUND; |
| 719 | |
| 720 | /* |
| 721 | * Turn off MII interrupt by forcing MINTEN low. |
| 722 | */ |
| 723 | minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; |
| 724 | if (minten) { |
| 725 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); |
| 726 | } |
| 727 | |
| 728 | /* |
| 729 | * Turn on data output. |
| 730 | */ |
| 731 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); |
| 732 | |
| 733 | tl_mii_send(sc, frame->mii_stdelim, 2); |
| 734 | tl_mii_send(sc, frame->mii_opcode, 2); |
| 735 | tl_mii_send(sc, frame->mii_phyaddr, 5); |
| 736 | tl_mii_send(sc, frame->mii_regaddr, 5); |
| 737 | tl_mii_send(sc, frame->mii_turnaround, 2); |
| 738 | tl_mii_send(sc, frame->mii_data, 16); |
| 739 | |
| 740 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 741 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); |
| 742 | |
| 743 | /* |
| 744 | * Turn off xmit. |
| 745 | */ |
| 746 | tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); |
| 747 | |
| 748 | /* Reenable interrupts */ |
| 749 | if (minten) |
| 750 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); |
| 751 | |
| 752 | return(0); |
| 753 | } |
| 754 | |
| 755 | static int |
| 756 | tl_miibus_readreg(device_t dev, int phy, int reg) |
| 757 | { |
| 758 | struct tl_softc *sc; |
| 759 | struct tl_mii_frame frame; |
| 760 | |
| 761 | sc = device_get_softc(dev); |
| 762 | bzero((char *)&frame, sizeof(frame)); |
| 763 | |
| 764 | frame.mii_phyaddr = phy; |
| 765 | frame.mii_regaddr = reg; |
| 766 | tl_mii_readreg(sc, &frame); |
| 767 | |
| 768 | return(frame.mii_data); |
| 769 | } |
| 770 | |
| 771 | static int |
| 772 | tl_miibus_writereg(device_t dev, int phy, int reg, int data) |
| 773 | { |
| 774 | struct tl_softc *sc; |
| 775 | struct tl_mii_frame frame; |
| 776 | |
| 777 | sc = device_get_softc(dev); |
| 778 | bzero((char *)&frame, sizeof(frame)); |
| 779 | |
| 780 | frame.mii_phyaddr = phy; |
| 781 | frame.mii_regaddr = reg; |
| 782 | frame.mii_data = data; |
| 783 | |
| 784 | tl_mii_writereg(sc, &frame); |
| 785 | |
| 786 | return(0); |
| 787 | } |
| 788 | |
| 789 | static void |
| 790 | tl_miibus_statchg(device_t dev) |
| 791 | { |
| 792 | struct tl_softc *sc; |
| 793 | struct mii_data *mii; |
| 794 | |
| 795 | sc = device_get_softc(dev); |
| 796 | mii = device_get_softc(sc->tl_miibus); |
| 797 | |
| 798 | if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { |
| 799 | tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); |
| 800 | } else { |
| 801 | tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); |
| 802 | } |
| 803 | |
| 804 | return; |
| 805 | } |
| 806 | |
| 807 | /* |
| 808 | * Set modes for bitrate devices. |
| 809 | */ |
| 810 | static void |
| 811 | tl_setmode(struct tl_softc *sc, int media) |
| 812 | { |
| 813 | if (IFM_SUBTYPE(media) == IFM_10_5) |
| 814 | tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); |
| 815 | if (IFM_SUBTYPE(media) == IFM_10_T) { |
| 816 | tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); |
| 817 | if ((media & IFM_GMASK) == IFM_FDX) { |
| 818 | tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); |
| 819 | tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); |
| 820 | } else { |
| 821 | tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); |
| 822 | tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); |
| 823 | } |
| 824 | } |
| 825 | |
| 826 | return; |
| 827 | } |
| 828 | |
| 829 | /* |
| 830 | * Calculate the hash of a MAC address for programming the multicast hash |
| 831 | * table. This hash is simply the address split into 6-bit chunks |
| 832 | * XOR'd, e.g. |
| 833 | * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 |
| 834 | * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 |
| 835 | * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then |
| 836 | * the folded 24-bit value is split into 6-bit portions and XOR'd. |
| 837 | */ |
| 838 | static int |
| 839 | tl_calchash(caddr_t addr) |
| 840 | { |
| 841 | int t; |
| 842 | |
| 843 | t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | |
| 844 | (addr[2] ^ addr[5]); |
| 845 | return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; |
| 846 | } |
| 847 | |
| 848 | /* |
| 849 | * The ThunderLAN has a perfect MAC address filter in addition to |
| 850 | * the multicast hash filter. The perfect filter can be programmed |
| 851 | * with up to four MAC addresses. The first one is always used to |
| 852 | * hold the station address, which leaves us free to use the other |
| 853 | * three for multicast addresses. |
| 854 | */ |
| 855 | static void |
| 856 | tl_setfilt(struct tl_softc *sc, caddr_t addr, int slot) |
| 857 | { |
| 858 | int i; |
| 859 | u_int16_t regaddr; |
| 860 | |
| 861 | regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); |
| 862 | |
| 863 | for (i = 0; i < ETHER_ADDR_LEN; i++) |
| 864 | tl_dio_write8(sc, regaddr + i, *(addr + i)); |
| 865 | |
| 866 | return; |
| 867 | } |
| 868 | |
| 869 | /* |
| 870 | * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly |
| 871 | * linked list. This is fine, except addresses are added from the head |
| 872 | * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") |
| 873 | * group to always be in the perfect filter, but as more groups are added, |
| 874 | * the 224.0.0.1 entry (which is always added first) gets pushed down |
| 875 | * the list and ends up at the tail. So after 3 or 4 multicast groups |
| 876 | * are added, the all-hosts entry gets pushed out of the perfect filter |
| 877 | * and into the hash table. |
| 878 | * |
| 879 | * Because the multicast list is a doubly-linked list as opposed to a |
| 880 | * circular queue, we don't have the ability to just grab the tail of |
| 881 | * the list and traverse it backwards. Instead, we have to traverse |
| 882 | * the list once to find the tail, then traverse it again backwards to |
| 883 | * update the multicast filter. |
| 884 | */ |
| 885 | static void |
| 886 | tl_setmulti(struct tl_softc *sc) |
| 887 | { |
| 888 | struct ifnet *ifp; |
| 889 | u_int32_t hashes[2] = { 0, 0 }; |
| 890 | int h, i; |
| 891 | struct ifmultiaddr *ifma; |
| 892 | u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; |
| 893 | ifp = &sc->arpcom.ac_if; |
| 894 | |
| 895 | /* First, zot all the existing filters. */ |
| 896 | for (i = 1; i < 4; i++) |
| 897 | tl_setfilt(sc, (caddr_t)&dummy, i); |
| 898 | tl_dio_write32(sc, TL_HASH1, 0); |
| 899 | tl_dio_write32(sc, TL_HASH2, 0); |
| 900 | |
| 901 | /* Now program new ones. */ |
| 902 | if (ifp->if_flags & IFF_ALLMULTI) { |
| 903 | hashes[0] = 0xFFFFFFFF; |
| 904 | hashes[1] = 0xFFFFFFFF; |
| 905 | } else { |
| 906 | i = 1; |
| 907 | TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { |
| 908 | if (ifma->ifma_addr->sa_family != AF_LINK) |
| 909 | continue; |
| 910 | /* |
| 911 | * Program the first three multicast groups |
| 912 | * into the perfect filter. For all others, |
| 913 | * use the hash table. |
| 914 | */ |
| 915 | if (i < 4) { |
| 916 | tl_setfilt(sc, |
| 917 | LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); |
| 918 | i++; |
| 919 | continue; |
| 920 | } |
| 921 | |
| 922 | h = tl_calchash( |
| 923 | LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); |
| 924 | if (h < 32) |
| 925 | hashes[0] |= (1 << h); |
| 926 | else |
| 927 | hashes[1] |= (1 << (h - 32)); |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | tl_dio_write32(sc, TL_HASH1, hashes[0]); |
| 932 | tl_dio_write32(sc, TL_HASH2, hashes[1]); |
| 933 | |
| 934 | return; |
| 935 | } |
| 936 | |
| 937 | /* |
| 938 | * This routine is recommended by the ThunderLAN manual to insure that |
| 939 | * the internal PHY is powered up correctly. It also recommends a one |
| 940 | * second pause at the end to 'wait for the clocks to start' but in my |
| 941 | * experience this isn't necessary. |
| 942 | */ |
| 943 | static void |
| 944 | tl_hardreset(device_t dev) |
| 945 | { |
| 946 | struct tl_softc *sc; |
| 947 | int i; |
| 948 | u_int16_t flags; |
| 949 | |
| 950 | sc = device_get_softc(dev); |
| 951 | |
| 952 | tl_mii_sync(sc); |
| 953 | |
| 954 | flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; |
| 955 | |
| 956 | for (i = 0; i < MII_NPHY; i++) |
| 957 | tl_miibus_writereg(dev, i, MII_BMCR, flags); |
| 958 | |
| 959 | tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); |
| 960 | DELAY(50000); |
| 961 | tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); |
| 962 | tl_mii_sync(sc); |
| 963 | while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); |
| 964 | |
| 965 | DELAY(50000); |
| 966 | return; |
| 967 | } |
| 968 | |
| 969 | static void |
| 970 | tl_softreset(struct tl_softc *sc, int internal) |
| 971 | { |
| 972 | u_int32_t cmd, dummy, i; |
| 973 | |
| 974 | /* Assert the adapter reset bit. */ |
| 975 | CMD_SET(sc, TL_CMD_ADRST); |
| 976 | |
| 977 | /* Turn off interrupts */ |
| 978 | CMD_SET(sc, TL_CMD_INTSOFF); |
| 979 | |
| 980 | /* First, clear the stats registers. */ |
| 981 | for (i = 0; i < 5; i++) |
| 982 | dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); |
| 983 | |
| 984 | /* Clear Areg and Hash registers */ |
| 985 | for (i = 0; i < 8; i++) |
| 986 | tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); |
| 987 | |
| 988 | /* |
| 989 | * Set up Netconfig register. Enable one channel and |
| 990 | * one fragment mode. |
| 991 | */ |
| 992 | tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); |
| 993 | if (internal && !sc->tl_bitrate) { |
| 994 | tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); |
| 995 | } else { |
| 996 | tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); |
| 997 | } |
| 998 | |
| 999 | /* Handle cards with bitrate devices. */ |
| 1000 | if (sc->tl_bitrate) |
| 1001 | tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); |
| 1002 | |
| 1003 | /* |
| 1004 | * Load adapter irq pacing timer and tx threshold. |
| 1005 | * We make the transmit threshold 1 initially but we may |
| 1006 | * change that later. |
| 1007 | */ |
| 1008 | cmd = CSR_READ_4(sc, TL_HOSTCMD); |
| 1009 | cmd |= TL_CMD_NES; |
| 1010 | cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); |
| 1011 | CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); |
| 1012 | CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); |
| 1013 | |
| 1014 | /* Unreset the MII */ |
| 1015 | tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); |
| 1016 | |
| 1017 | /* Take the adapter out of reset */ |
| 1018 | tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); |
| 1019 | |
| 1020 | /* Wait for things to settle down a little. */ |
| 1021 | DELAY(500); |
| 1022 | |
| 1023 | return; |
| 1024 | } |
| 1025 | |
| 1026 | /* |
| 1027 | * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs |
| 1028 | * against our list and return its name if we find a match. |
| 1029 | */ |
| 1030 | static int |
| 1031 | tl_probe(device_t dev) |
| 1032 | { |
| 1033 | struct tl_type *t; |
| 1034 | |
| 1035 | t = tl_devs; |
| 1036 | |
| 1037 | while(t->tl_name != NULL) { |
| 1038 | if ((pci_get_vendor(dev) == t->tl_vid) && |
| 1039 | (pci_get_device(dev) == t->tl_did)) { |
| 1040 | device_set_desc(dev, t->tl_name); |
| 1041 | return(0); |
| 1042 | } |
| 1043 | t++; |
| 1044 | } |
| 1045 | |
| 1046 | return(ENXIO); |
| 1047 | } |
| 1048 | |
| 1049 | static int |
| 1050 | tl_attach(device_t dev) |
| 1051 | { |
| 1052 | int i; |
| 1053 | u_int16_t did, vid; |
| 1054 | struct tl_type *t; |
| 1055 | struct ifnet *ifp; |
| 1056 | struct tl_softc *sc; |
| 1057 | int error = 0, rid; |
| 1058 | uint8_t eaddr[ETHER_ADDR_LEN]; |
| 1059 | |
| 1060 | vid = pci_get_vendor(dev); |
| 1061 | did = pci_get_device(dev); |
| 1062 | sc = device_get_softc(dev); |
| 1063 | |
| 1064 | t = tl_devs; |
| 1065 | while(t->tl_name != NULL) { |
| 1066 | if (vid == t->tl_vid && did == t->tl_did) |
| 1067 | break; |
| 1068 | t++; |
| 1069 | } |
| 1070 | |
| 1071 | KKASSERT(t->tl_name != NULL); |
| 1072 | |
| 1073 | pci_enable_busmaster(dev); |
| 1074 | |
| 1075 | #ifdef TL_USEIOSPACE |
| 1076 | rid = TL_PCI_LOIO; |
| 1077 | sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, |
| 1078 | RF_ACTIVE); |
| 1079 | |
| 1080 | /* |
| 1081 | * Some cards have the I/O and memory mapped address registers |
| 1082 | * reversed. Try both combinations before giving up. |
| 1083 | */ |
| 1084 | if (sc->tl_res == NULL) { |
| 1085 | rid = TL_PCI_LOMEM; |
| 1086 | sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, |
| 1087 | RF_ACTIVE); |
| 1088 | } |
| 1089 | #else |
| 1090 | rid = TL_PCI_LOMEM; |
| 1091 | sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
| 1092 | RF_ACTIVE); |
| 1093 | if (sc->tl_res == NULL) { |
| 1094 | rid = TL_PCI_LOIO; |
| 1095 | sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
| 1096 | RF_ACTIVE); |
| 1097 | } |
| 1098 | #endif |
| 1099 | |
| 1100 | if (sc->tl_res == NULL) { |
| 1101 | device_printf(dev, "couldn't map ports/memory\n"); |
| 1102 | error = ENXIO; |
| 1103 | return(error); |
| 1104 | } |
| 1105 | |
| 1106 | sc->tl_btag = rman_get_bustag(sc->tl_res); |
| 1107 | sc->tl_bhandle = rman_get_bushandle(sc->tl_res); |
| 1108 | |
| 1109 | #ifdef notdef |
| 1110 | /* |
| 1111 | * The ThunderLAN manual suggests jacking the PCI latency |
| 1112 | * timer all the way up to its maximum value. I'm not sure |
| 1113 | * if this is really necessary, but what the manual wants, |
| 1114 | * the manual gets. |
| 1115 | */ |
| 1116 | command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); |
| 1117 | command |= 0x0000FF00; |
| 1118 | pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); |
| 1119 | #endif |
| 1120 | |
| 1121 | /* Allocate interrupt */ |
| 1122 | rid = 0; |
| 1123 | sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, |
| 1124 | RF_SHAREABLE | RF_ACTIVE); |
| 1125 | |
| 1126 | if (sc->tl_irq == NULL) { |
| 1127 | device_printf(dev, "couldn't map interrupt\n"); |
| 1128 | error = ENXIO; |
| 1129 | goto fail; |
| 1130 | } |
| 1131 | |
| 1132 | /* |
| 1133 | * Now allocate memory for the TX and RX lists. |
| 1134 | */ |
| 1135 | sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, |
| 1136 | M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); |
| 1137 | |
| 1138 | if (sc->tl_ldata == NULL) { |
| 1139 | device_printf(dev, "no memory for list buffers!\n"); |
| 1140 | error = ENXIO; |
| 1141 | goto fail; |
| 1142 | } |
| 1143 | |
| 1144 | sc->tl_dinfo = t; |
| 1145 | if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID) |
| 1146 | sc->tl_eeaddr = TL_EEPROM_EADDR; |
| 1147 | if (t->tl_vid == OLICOM_VENDORID) |
| 1148 | sc->tl_eeaddr = TL_EEPROM_EADDR_OC; |
| 1149 | |
| 1150 | /* Reset the adapter. */ |
| 1151 | tl_softreset(sc, 1); |
| 1152 | tl_hardreset(dev); |
| 1153 | tl_softreset(sc, 1); |
| 1154 | |
| 1155 | ifp = &sc->arpcom.ac_if; |
| 1156 | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
| 1157 | |
| 1158 | /* |
| 1159 | * Get station address from the EEPROM. |
| 1160 | */ |
| 1161 | if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) { |
| 1162 | device_printf(dev, "failed to read station address\n"); |
| 1163 | error = ENXIO; |
| 1164 | goto fail; |
| 1165 | } |
| 1166 | |
| 1167 | /* |
| 1168 | * XXX Olicom, in its desire to be different from the |
| 1169 | * rest of the world, has done strange things with the |
| 1170 | * encoding of the station address in the EEPROM. First |
| 1171 | * of all, they store the address at offset 0xF8 rather |
| 1172 | * than at 0x83 like the ThunderLAN manual suggests. |
| 1173 | * Second, they store the address in three 16-bit words in |
| 1174 | * network byte order, as opposed to storing it sequentially |
| 1175 | * like all the other ThunderLAN cards. In order to get |
| 1176 | * the station address in a form that matches what the Olicom |
| 1177 | * diagnostic utility specifies, we have to byte-swap each |
| 1178 | * word. To make things even more confusing, neither 00:00:28 |
| 1179 | * nor 00:00:24 appear in the IEEE OUI database. |
| 1180 | */ |
| 1181 | if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) { |
| 1182 | for (i = 0; i < ETHER_ADDR_LEN; i += 2) { |
| 1183 | u_int16_t *p; |
| 1184 | p = (u_int16_t *)&eaddr[i]; |
| 1185 | *p = ntohs(*p); |
| 1186 | } |
| 1187 | } |
| 1188 | |
| 1189 | ifp->if_softc = sc; |
| 1190 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
| 1191 | ifp->if_ioctl = tl_ioctl; |
| 1192 | ifp->if_start = tl_start; |
| 1193 | ifp->if_watchdog = tl_watchdog; |
| 1194 | ifp->if_init = tl_init; |
| 1195 | ifp->if_mtu = ETHERMTU; |
| 1196 | ifq_set_maxlen(&ifp->if_snd, TL_TX_LIST_CNT - 1); |
| 1197 | ifq_set_ready(&ifp->if_snd); |
| 1198 | callout_init(&sc->tl_stat_timer); |
| 1199 | |
| 1200 | /* Reset the adapter again. */ |
| 1201 | tl_softreset(sc, 1); |
| 1202 | tl_hardreset(dev); |
| 1203 | tl_softreset(sc, 1); |
| 1204 | |
| 1205 | /* |
| 1206 | * Do MII setup. If no PHYs are found, then this is a |
| 1207 | * bitrate ThunderLAN chip that only supports 10baseT |
| 1208 | * and AUI/BNC. |
| 1209 | */ |
| 1210 | if (mii_phy_probe(dev, &sc->tl_miibus, |
| 1211 | tl_ifmedia_upd, tl_ifmedia_sts)) { |
| 1212 | struct ifmedia *ifm; |
| 1213 | sc->tl_bitrate = 1; |
| 1214 | ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); |
| 1215 | ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); |
| 1216 | ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); |
| 1217 | ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); |
| 1218 | ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); |
| 1219 | ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); |
| 1220 | /* Reset again, this time setting bitrate mode. */ |
| 1221 | tl_softreset(sc, 1); |
| 1222 | ifm = &sc->ifmedia; |
| 1223 | ifm->ifm_media = ifm->ifm_cur->ifm_media; |
| 1224 | tl_ifmedia_upd(ifp); |
| 1225 | } |
| 1226 | |
| 1227 | /* |
| 1228 | * Call MI attach routine. |
| 1229 | */ |
| 1230 | ether_ifattach(ifp, eaddr, NULL); |
| 1231 | |
| 1232 | error = bus_setup_intr(dev, sc->tl_irq, INTR_MPSAFE, |
| 1233 | tl_intr, sc, &sc->tl_intrhand, |
| 1234 | ifp->if_serializer); |
| 1235 | |
| 1236 | if (error) { |
| 1237 | ether_ifdetach(ifp); |
| 1238 | device_printf(dev, "couldn't set up irq\n"); |
| 1239 | goto fail; |
| 1240 | } |
| 1241 | |
| 1242 | ifp->if_cpuid = rman_get_cpuid(sc->tl_irq); |
| 1243 | KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); |
| 1244 | |
| 1245 | return(0); |
| 1246 | |
| 1247 | fail: |
| 1248 | tl_detach(dev); |
| 1249 | return(error); |
| 1250 | } |
| 1251 | |
| 1252 | static int |
| 1253 | tl_detach(device_t dev) |
| 1254 | { |
| 1255 | struct tl_softc *sc = device_get_softc(dev); |
| 1256 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 1257 | |
| 1258 | if (device_is_attached(dev)) { |
| 1259 | lwkt_serialize_enter(ifp->if_serializer); |
| 1260 | tl_stop(sc); |
| 1261 | bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); |
| 1262 | lwkt_serialize_exit(ifp->if_serializer); |
| 1263 | |
| 1264 | ether_ifdetach(ifp); |
| 1265 | } |
| 1266 | |
| 1267 | if (sc->tl_miibus) |
| 1268 | device_delete_child(dev, sc->tl_miibus); |
| 1269 | bus_generic_detach(dev); |
| 1270 | |
| 1271 | if (sc->tl_ldata) |
| 1272 | contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); |
| 1273 | if (sc->tl_bitrate) |
| 1274 | ifmedia_removeall(&sc->ifmedia); |
| 1275 | if (sc->tl_irq) |
| 1276 | bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); |
| 1277 | if (sc->tl_res) |
| 1278 | bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); |
| 1279 | |
| 1280 | return(0); |
| 1281 | } |
| 1282 | |
| 1283 | /* |
| 1284 | * Initialize the transmit lists. |
| 1285 | */ |
| 1286 | static int |
| 1287 | tl_list_tx_init(struct tl_softc *sc) |
| 1288 | { |
| 1289 | struct tl_chain_data *cd; |
| 1290 | struct tl_list_data *ld; |
| 1291 | int i; |
| 1292 | |
| 1293 | cd = &sc->tl_cdata; |
| 1294 | ld = sc->tl_ldata; |
| 1295 | for (i = 0; i < TL_TX_LIST_CNT; i++) { |
| 1296 | cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; |
| 1297 | if (i == (TL_TX_LIST_CNT - 1)) |
| 1298 | cd->tl_tx_chain[i].tl_next = NULL; |
| 1299 | else |
| 1300 | cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; |
| 1301 | } |
| 1302 | |
| 1303 | cd->tl_tx_free = &cd->tl_tx_chain[0]; |
| 1304 | cd->tl_tx_tail = cd->tl_tx_head = NULL; |
| 1305 | sc->tl_txeoc = 1; |
| 1306 | |
| 1307 | return(0); |
| 1308 | } |
| 1309 | |
| 1310 | /* |
| 1311 | * Initialize the RX lists and allocate mbufs for them. |
| 1312 | */ |
| 1313 | static int |
| 1314 | tl_list_rx_init(struct tl_softc *sc) |
| 1315 | { |
| 1316 | struct tl_chain_data *cd; |
| 1317 | struct tl_list_data *ld; |
| 1318 | int i; |
| 1319 | |
| 1320 | cd = &sc->tl_cdata; |
| 1321 | ld = sc->tl_ldata; |
| 1322 | |
| 1323 | for (i = 0; i < TL_RX_LIST_CNT; i++) { |
| 1324 | cd->tl_rx_chain[i].tl_ptr = |
| 1325 | (struct tl_list_onefrag *)&ld->tl_rx_list[i]; |
| 1326 | if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) |
| 1327 | return(ENOBUFS); |
| 1328 | if (i == (TL_RX_LIST_CNT - 1)) { |
| 1329 | cd->tl_rx_chain[i].tl_next = NULL; |
| 1330 | ld->tl_rx_list[i].tlist_fptr = 0; |
| 1331 | } else { |
| 1332 | cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; |
| 1333 | ld->tl_rx_list[i].tlist_fptr = |
| 1334 | vtophys(&ld->tl_rx_list[i + 1]); |
| 1335 | } |
| 1336 | } |
| 1337 | |
| 1338 | cd->tl_rx_head = &cd->tl_rx_chain[0]; |
| 1339 | cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; |
| 1340 | |
| 1341 | return(0); |
| 1342 | } |
| 1343 | |
| 1344 | static int |
| 1345 | tl_newbuf(struct tl_softc *sc, struct tl_chain_onefrag *c) |
| 1346 | { |
| 1347 | struct mbuf *m_new; |
| 1348 | |
| 1349 | m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); |
| 1350 | if (m_new == NULL) |
| 1351 | return (ENOBUFS); |
| 1352 | |
| 1353 | c->tl_mbuf = m_new; |
| 1354 | c->tl_next = NULL; |
| 1355 | c->tl_ptr->tlist_frsize = MCLBYTES; |
| 1356 | c->tl_ptr->tlist_fptr = 0; |
| 1357 | c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); |
| 1358 | c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; |
| 1359 | c->tl_ptr->tlist_cstat = TL_CSTAT_READY; |
| 1360 | |
| 1361 | return(0); |
| 1362 | } |
| 1363 | |
| 1364 | /* |
| 1365 | * Interrupt handler for RX 'end of frame' condition (EOF). This |
| 1366 | * tells us that a full ethernet frame has been captured and we need |
| 1367 | * to handle it. |
| 1368 | * |
| 1369 | * Reception is done using 'lists' which consist of a header and a |
| 1370 | * series of 10 data count/data address pairs that point to buffers. |
| 1371 | * Initially you're supposed to create a list, populate it with pointers |
| 1372 | * to buffers, then load the physical address of the list into the |
| 1373 | * ch_parm register. The adapter is then supposed to DMA the received |
| 1374 | * frame into the buffers for you. |
| 1375 | * |
| 1376 | * To make things as fast as possible, we have the chip DMA directly |
| 1377 | * into mbufs. This saves us from having to do a buffer copy: we can |
| 1378 | * just hand the mbufs directly to ether_input(). Once the frame has |
| 1379 | * been sent on its way, the 'list' structure is assigned a new buffer |
| 1380 | * and moved to the end of the RX chain. As long we we stay ahead of |
| 1381 | * the chip, it will always think it has an endless receive channel. |
| 1382 | * |
| 1383 | * If we happen to fall behind and the chip manages to fill up all of |
| 1384 | * the buffers, it will generate an end of channel interrupt and wait |
| 1385 | * for us to empty the chain and restart the receiver. |
| 1386 | */ |
| 1387 | static int |
| 1388 | tl_intvec_rxeof(void *xsc, u_int32_t type) |
| 1389 | { |
| 1390 | struct tl_softc *sc; |
| 1391 | int r = 0, total_len = 0; |
| 1392 | struct ether_header *eh; |
| 1393 | struct mbuf *m; |
| 1394 | struct ifnet *ifp; |
| 1395 | struct tl_chain_onefrag *cur_rx; |
| 1396 | |
| 1397 | sc = xsc; |
| 1398 | ifp = &sc->arpcom.ac_if; |
| 1399 | |
| 1400 | while(sc->tl_cdata.tl_rx_head != NULL) { |
| 1401 | cur_rx = sc->tl_cdata.tl_rx_head; |
| 1402 | if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) |
| 1403 | break; |
| 1404 | r++; |
| 1405 | sc->tl_cdata.tl_rx_head = cur_rx->tl_next; |
| 1406 | m = cur_rx->tl_mbuf; |
| 1407 | total_len = cur_rx->tl_ptr->tlist_frsize; |
| 1408 | |
| 1409 | if (tl_newbuf(sc, cur_rx) == ENOBUFS) { |
| 1410 | ifp->if_ierrors++; |
| 1411 | cur_rx->tl_ptr->tlist_frsize = MCLBYTES; |
| 1412 | cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; |
| 1413 | cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; |
| 1414 | continue; |
| 1415 | } |
| 1416 | |
| 1417 | sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = |
| 1418 | vtophys(cur_rx->tl_ptr); |
| 1419 | sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; |
| 1420 | sc->tl_cdata.tl_rx_tail = cur_rx; |
| 1421 | |
| 1422 | eh = mtod(m, struct ether_header *); |
| 1423 | m->m_pkthdr.rcvif = ifp; |
| 1424 | m->m_pkthdr.len = m->m_len = total_len; |
| 1425 | |
| 1426 | /* |
| 1427 | * Note: when the ThunderLAN chip is in 'capture all |
| 1428 | * frames' mode, it will receive its own transmissions. |
| 1429 | * We drop don't need to process our own transmissions, |
| 1430 | * so we drop them here and continue. |
| 1431 | */ |
| 1432 | /*if (ifp->if_flags & IFF_PROMISC && */ |
| 1433 | if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, |
| 1434 | ETHER_ADDR_LEN)) { |
| 1435 | m_freem(m); |
| 1436 | continue; |
| 1437 | } |
| 1438 | |
| 1439 | ifp->if_input(ifp, m); |
| 1440 | } |
| 1441 | |
| 1442 | return(r); |
| 1443 | } |
| 1444 | |
| 1445 | /* |
| 1446 | * The RX-EOC condition hits when the ch_parm address hasn't been |
| 1447 | * initialized or the adapter reached a list with a forward pointer |
| 1448 | * of 0 (which indicates the end of the chain). In our case, this means |
| 1449 | * the card has hit the end of the receive buffer chain and we need to |
| 1450 | * empty out the buffers and shift the pointer back to the beginning again. |
| 1451 | */ |
| 1452 | static int |
| 1453 | tl_intvec_rxeoc(void *xsc, u_int32_t type) |
| 1454 | { |
| 1455 | struct tl_softc *sc; |
| 1456 | int r; |
| 1457 | struct tl_chain_data *cd; |
| 1458 | |
| 1459 | |
| 1460 | sc = xsc; |
| 1461 | cd = &sc->tl_cdata; |
| 1462 | |
| 1463 | /* Flush out the receive queue and ack RXEOF interrupts. */ |
| 1464 | r = tl_intvec_rxeof(xsc, type); |
| 1465 | CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); |
| 1466 | r = 1; |
| 1467 | cd->tl_rx_head = &cd->tl_rx_chain[0]; |
| 1468 | cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; |
| 1469 | CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); |
| 1470 | r |= (TL_CMD_GO|TL_CMD_RT); |
| 1471 | return(r); |
| 1472 | } |
| 1473 | |
| 1474 | static int |
| 1475 | tl_intvec_txeof(void *xsc, u_int32_t type) |
| 1476 | { |
| 1477 | struct tl_softc *sc; |
| 1478 | int r = 0; |
| 1479 | struct tl_chain *cur_tx; |
| 1480 | |
| 1481 | sc = xsc; |
| 1482 | |
| 1483 | /* |
| 1484 | * Go through our tx list and free mbufs for those |
| 1485 | * frames that have been sent. |
| 1486 | */ |
| 1487 | while (sc->tl_cdata.tl_tx_head != NULL) { |
| 1488 | cur_tx = sc->tl_cdata.tl_tx_head; |
| 1489 | if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) |
| 1490 | break; |
| 1491 | sc->tl_cdata.tl_tx_head = cur_tx->tl_next; |
| 1492 | |
| 1493 | r++; |
| 1494 | m_freem(cur_tx->tl_mbuf); |
| 1495 | cur_tx->tl_mbuf = NULL; |
| 1496 | |
| 1497 | cur_tx->tl_next = sc->tl_cdata.tl_tx_free; |
| 1498 | sc->tl_cdata.tl_tx_free = cur_tx; |
| 1499 | if (!cur_tx->tl_ptr->tlist_fptr) |
| 1500 | break; |
| 1501 | } |
| 1502 | |
| 1503 | return(r); |
| 1504 | } |
| 1505 | |
| 1506 | /* |
| 1507 | * The transmit end of channel interrupt. The adapter triggers this |
| 1508 | * interrupt to tell us it hit the end of the current transmit list. |
| 1509 | * |
| 1510 | * A note about this: it's possible for a condition to arise where |
| 1511 | * tl_start() may try to send frames between TXEOF and TXEOC interrupts. |
| 1512 | * You have to avoid this since the chip expects things to go in a |
| 1513 | * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. |
| 1514 | * When the TXEOF handler is called, it will free all of the transmitted |
| 1515 | * frames and reset the tx_head pointer to NULL. However, a TXEOC |
| 1516 | * interrupt should be received and acknowledged before any more frames |
| 1517 | * are queued for transmission. If tl_statrt() is called after TXEOF |
| 1518 | * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, |
| 1519 | * it could attempt to issue a transmit command prematurely. |
| 1520 | * |
| 1521 | * To guard against this, tl_start() will only issue transmit commands |
| 1522 | * if the tl_txeoc flag is set, and only the TXEOC interrupt handler |
| 1523 | * can set this flag once tl_start() has cleared it. |
| 1524 | */ |
| 1525 | static int |
| 1526 | tl_intvec_txeoc(void *xsc, u_int32_t type) |
| 1527 | { |
| 1528 | struct tl_softc *sc; |
| 1529 | struct ifnet *ifp; |
| 1530 | u_int32_t cmd; |
| 1531 | |
| 1532 | sc = xsc; |
| 1533 | ifp = &sc->arpcom.ac_if; |
| 1534 | |
| 1535 | /* Clear the timeout timer. */ |
| 1536 | ifp->if_timer = 0; |
| 1537 | |
| 1538 | if (sc->tl_cdata.tl_tx_head == NULL) { |
| 1539 | ifp->if_flags &= ~IFF_OACTIVE; |
| 1540 | sc->tl_cdata.tl_tx_tail = NULL; |
| 1541 | sc->tl_txeoc = 1; |
| 1542 | } else { |
| 1543 | sc->tl_txeoc = 0; |
| 1544 | /* First we have to ack the EOC interrupt. */ |
| 1545 | CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); |
| 1546 | /* Then load the address of the next TX list. */ |
| 1547 | CSR_WRITE_4(sc, TL_CH_PARM, |
| 1548 | vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); |
| 1549 | /* Restart TX channel. */ |
| 1550 | cmd = CSR_READ_4(sc, TL_HOSTCMD); |
| 1551 | cmd &= ~TL_CMD_RT; |
| 1552 | cmd |= TL_CMD_GO|TL_CMD_INTSON; |
| 1553 | CMD_PUT(sc, cmd); |
| 1554 | return(0); |
| 1555 | } |
| 1556 | |
| 1557 | return(1); |
| 1558 | } |
| 1559 | |
| 1560 | static int |
| 1561 | tl_intvec_adchk(void *xsc, u_int32_t type) |
| 1562 | { |
| 1563 | struct tl_softc *sc; |
| 1564 | |
| 1565 | sc = xsc; |
| 1566 | |
| 1567 | if (type) { |
| 1568 | if_printf(&sc->arpcom.ac_if, "adapter check: %x\n", |
| 1569 | (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); |
| 1570 | } |
| 1571 | |
| 1572 | tl_softreset(sc, 1); |
| 1573 | tl_stop(sc); |
| 1574 | tl_init(sc); |
| 1575 | CMD_SET(sc, TL_CMD_INTSON); |
| 1576 | |
| 1577 | return(0); |
| 1578 | } |
| 1579 | |
| 1580 | static int |
| 1581 | tl_intvec_netsts(void *xsc, u_int32_t type) |
| 1582 | { |
| 1583 | struct tl_softc *sc; |
| 1584 | u_int16_t netsts; |
| 1585 | |
| 1586 | sc = xsc; |
| 1587 | |
| 1588 | netsts = tl_dio_read16(sc, TL_NETSTS); |
| 1589 | tl_dio_write16(sc, TL_NETSTS, netsts); |
| 1590 | |
| 1591 | if_printf(&sc->arpcom.ac_if, "network status: %x\n", netsts); |
| 1592 | |
| 1593 | return(1); |
| 1594 | } |
| 1595 | |
| 1596 | static void |
| 1597 | tl_intr(void *xsc) |
| 1598 | { |
| 1599 | struct tl_softc *sc; |
| 1600 | struct ifnet *ifp; |
| 1601 | int r = 0; |
| 1602 | u_int32_t type = 0; |
| 1603 | u_int16_t ints = 0; |
| 1604 | u_int8_t ivec = 0; |
| 1605 | |
| 1606 | sc = xsc; |
| 1607 | |
| 1608 | /* Disable interrupts */ |
| 1609 | ints = CSR_READ_2(sc, TL_HOST_INT); |
| 1610 | CSR_WRITE_2(sc, TL_HOST_INT, ints); |
| 1611 | type = (ints << 16) & 0xFFFF0000; |
| 1612 | ivec = (ints & TL_VEC_MASK) >> 5; |
| 1613 | ints = (ints & TL_INT_MASK) >> 2; |
| 1614 | |
| 1615 | ifp = &sc->arpcom.ac_if; |
| 1616 | |
| 1617 | switch(ints) { |
| 1618 | case (TL_INTR_INVALID): |
| 1619 | #ifdef DIAGNOSTIC |
| 1620 | if_printf(ifp, "got an invalid interrupt!\n"); |
| 1621 | #endif |
| 1622 | /* Re-enable interrupts but don't ack this one. */ |
| 1623 | CMD_PUT(sc, type); |
| 1624 | r = 0; |
| 1625 | break; |
| 1626 | case (TL_INTR_TXEOF): |
| 1627 | r = tl_intvec_txeof(sc, type); |
| 1628 | break; |
| 1629 | case (TL_INTR_TXEOC): |
| 1630 | r = tl_intvec_txeoc(sc, type); |
| 1631 | break; |
| 1632 | case (TL_INTR_STATOFLOW): |
| 1633 | tl_stats_update_serialized(sc); |
| 1634 | r = 1; |
| 1635 | break; |
| 1636 | case (TL_INTR_RXEOF): |
| 1637 | r = tl_intvec_rxeof(sc, type); |
| 1638 | break; |
| 1639 | case (TL_INTR_DUMMY): |
| 1640 | if_printf(ifp, "got a dummy interrupt\n"); |
| 1641 | r = 1; |
| 1642 | break; |
| 1643 | case (TL_INTR_ADCHK): |
| 1644 | if (ivec) |
| 1645 | r = tl_intvec_adchk(sc, type); |
| 1646 | else |
| 1647 | r = tl_intvec_netsts(sc, type); |
| 1648 | break; |
| 1649 | case (TL_INTR_RXEOC): |
| 1650 | r = tl_intvec_rxeoc(sc, type); |
| 1651 | break; |
| 1652 | default: |
| 1653 | if_printf(ifp, "bogus interrupt type\n"); |
| 1654 | break; |
| 1655 | } |
| 1656 | |
| 1657 | /* Re-enable interrupts */ |
| 1658 | if (r) { |
| 1659 | CMD_PUT(sc, TL_CMD_ACK | r | type); |
| 1660 | } |
| 1661 | |
| 1662 | if (!ifq_is_empty(&ifp->if_snd)) |
| 1663 | if_devstart(ifp); |
| 1664 | } |
| 1665 | |
| 1666 | static |
| 1667 | void |
| 1668 | tl_stats_update(void *xsc) |
| 1669 | { |
| 1670 | struct tl_softc *sc = xsc; |
| 1671 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 1672 | |
| 1673 | lwkt_serialize_enter(ifp->if_serializer); |
| 1674 | tl_stats_update_serialized(xsc); |
| 1675 | lwkt_serialize_exit(ifp->if_serializer); |
| 1676 | } |
| 1677 | |
| 1678 | static |
| 1679 | void |
| 1680 | tl_stats_update_serialized(void *xsc) |
| 1681 | { |
| 1682 | struct tl_softc *sc; |
| 1683 | struct ifnet *ifp; |
| 1684 | struct tl_stats tl_stats; |
| 1685 | struct mii_data *mii; |
| 1686 | u_int32_t *p; |
| 1687 | |
| 1688 | bzero((char *)&tl_stats, sizeof(struct tl_stats)); |
| 1689 | |
| 1690 | sc = xsc; |
| 1691 | ifp = &sc->arpcom.ac_if; |
| 1692 | |
| 1693 | p = (u_int32_t *)&tl_stats; |
| 1694 | |
| 1695 | CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); |
| 1696 | *p++ = CSR_READ_4(sc, TL_DIO_DATA); |
| 1697 | *p++ = CSR_READ_4(sc, TL_DIO_DATA); |
| 1698 | *p++ = CSR_READ_4(sc, TL_DIO_DATA); |
| 1699 | *p++ = CSR_READ_4(sc, TL_DIO_DATA); |
| 1700 | *p++ = CSR_READ_4(sc, TL_DIO_DATA); |
| 1701 | |
| 1702 | ifp->if_opackets += tl_tx_goodframes(tl_stats); |
| 1703 | ifp->if_collisions += tl_stats.tl_tx_single_collision + |
| 1704 | tl_stats.tl_tx_multi_collision; |
| 1705 | ifp->if_ipackets += tl_rx_goodframes(tl_stats); |
| 1706 | ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + |
| 1707 | tl_rx_overrun(tl_stats); |
| 1708 | ifp->if_oerrors += tl_tx_underrun(tl_stats); |
| 1709 | |
| 1710 | if (tl_tx_underrun(tl_stats)) { |
| 1711 | u_int8_t tx_thresh; |
| 1712 | tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; |
| 1713 | if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { |
| 1714 | tx_thresh >>= 4; |
| 1715 | tx_thresh++; |
| 1716 | if_printf(ifp, "tx underrun -- increasing " |
| 1717 | "tx threshold to %d bytes\n", |
| 1718 | (64 * (tx_thresh * 4))); |
| 1719 | tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); |
| 1720 | tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); |
| 1721 | } |
| 1722 | } |
| 1723 | |
| 1724 | callout_reset(&sc->tl_stat_timer, hz, tl_stats_update, sc); |
| 1725 | |
| 1726 | if (!sc->tl_bitrate) { |
| 1727 | mii = device_get_softc(sc->tl_miibus); |
| 1728 | mii_tick(mii); |
| 1729 | } |
| 1730 | } |
| 1731 | |
| 1732 | /* |
| 1733 | * Encapsulate an mbuf chain in a list by coupling the mbuf data |
| 1734 | * pointers to the fragment pointers. |
| 1735 | */ |
| 1736 | static int |
| 1737 | tl_encap(struct tl_softc *sc, struct tl_chain *c, struct mbuf *m_head) |
| 1738 | { |
| 1739 | int frag = 0; |
| 1740 | struct tl_frag *f = NULL; |
| 1741 | int total_len; |
| 1742 | struct mbuf *m; |
| 1743 | |
| 1744 | /* |
| 1745 | * Start packing the mbufs in this chain into |
| 1746 | * the fragment pointers. Stop when we run out |
| 1747 | * of fragments or hit the end of the mbuf chain. |
| 1748 | */ |
| 1749 | total_len = 0; |
| 1750 | |
| 1751 | for (m = m_head, frag = 0; m != NULL; m = m->m_next) { |
| 1752 | if (m->m_len != 0) { |
| 1753 | if (frag == TL_MAXFRAGS) |
| 1754 | break; |
| 1755 | total_len+= m->m_len; |
| 1756 | c->tl_ptr->tl_frag[frag].tlist_dadr = |
| 1757 | vtophys(mtod(m, vm_offset_t)); |
| 1758 | c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; |
| 1759 | frag++; |
| 1760 | } |
| 1761 | } |
| 1762 | |
| 1763 | /* |
| 1764 | * Handle special cases. |
| 1765 | * Special case #1: we used up all 10 fragments, but |
| 1766 | * we have more mbufs left in the chain. Copy the |
| 1767 | * data into an mbuf cluster. Note that we don't |
| 1768 | * bother clearing the values in the other fragment |
| 1769 | * pointers/counters; it wouldn't gain us anything, |
| 1770 | * and would waste cycles. |
| 1771 | */ |
| 1772 | if (m != NULL) { |
| 1773 | struct mbuf *m_new; |
| 1774 | |
| 1775 | m_new = m_getl(m_head->m_pkthdr.len, MB_DONTWAIT, MT_DATA, |
| 1776 | M_PKTHDR, NULL); |
| 1777 | if (m_new == NULL) { |
| 1778 | if_printf(&sc->arpcom.ac_if, "no memory for tx list\n"); |
| 1779 | return (1); |
| 1780 | } |
| 1781 | m_copydata(m_head, 0, m_head->m_pkthdr.len, |
| 1782 | mtod(m_new, caddr_t)); |
| 1783 | m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; |
| 1784 | m_freem(m_head); |
| 1785 | m_head = m_new; |
| 1786 | f = &c->tl_ptr->tl_frag[0]; |
| 1787 | f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); |
| 1788 | f->tlist_dcnt = total_len = m_new->m_len; |
| 1789 | frag = 1; |
| 1790 | } |
| 1791 | |
| 1792 | /* |
| 1793 | * Special case #2: the frame is smaller than the minimum |
| 1794 | * frame size. We have to pad it to make the chip happy. |
| 1795 | */ |
| 1796 | if (total_len < TL_MIN_FRAMELEN) { |
| 1797 | if (frag == TL_MAXFRAGS) { |
| 1798 | if_printf(&sc->arpcom.ac_if, "all frags filled but " |
| 1799 | "frame still to small!\n"); |
| 1800 | } |
| 1801 | f = &c->tl_ptr->tl_frag[frag]; |
| 1802 | f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; |
| 1803 | f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); |
| 1804 | total_len += f->tlist_dcnt; |
| 1805 | frag++; |
| 1806 | } |
| 1807 | |
| 1808 | c->tl_mbuf = m_head; |
| 1809 | c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; |
| 1810 | c->tl_ptr->tlist_frsize = total_len; |
| 1811 | c->tl_ptr->tlist_cstat = TL_CSTAT_READY; |
| 1812 | c->tl_ptr->tlist_fptr = 0; |
| 1813 | |
| 1814 | return(0); |
| 1815 | } |
| 1816 | |
| 1817 | /* |
| 1818 | * Main transmit routine. To avoid having to do mbuf copies, we put pointers |
| 1819 | * to the mbuf data regions directly in the transmit lists. We also save a |
| 1820 | * copy of the pointers since the transmit list fragment pointers are |
| 1821 | * physical addresses. |
| 1822 | */ |
| 1823 | static void |
| 1824 | tl_start(struct ifnet *ifp) |
| 1825 | { |
| 1826 | struct tl_softc *sc; |
| 1827 | struct mbuf *m_head = NULL; |
| 1828 | u_int32_t cmd; |
| 1829 | struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; |
| 1830 | |
| 1831 | sc = ifp->if_softc; |
| 1832 | |
| 1833 | /* |
| 1834 | * Check for an available queue slot. If there are none, |
| 1835 | * punt. |
| 1836 | */ |
| 1837 | if (sc->tl_cdata.tl_tx_free == NULL) { |
| 1838 | ifp->if_flags |= IFF_OACTIVE; |
| 1839 | return; |
| 1840 | } |
| 1841 | |
| 1842 | start_tx = sc->tl_cdata.tl_tx_free; |
| 1843 | |
| 1844 | while(sc->tl_cdata.tl_tx_free != NULL) { |
| 1845 | m_head = ifq_dequeue(&ifp->if_snd, NULL); |
| 1846 | if (m_head == NULL) |
| 1847 | break; |
| 1848 | |
| 1849 | /* Pick a chain member off the free list. */ |
| 1850 | cur_tx = sc->tl_cdata.tl_tx_free; |
| 1851 | sc->tl_cdata.tl_tx_free = cur_tx->tl_next; |
| 1852 | |
| 1853 | cur_tx->tl_next = NULL; |
| 1854 | |
| 1855 | /* Pack the data into the list. */ |
| 1856 | tl_encap(sc, cur_tx, m_head); |
| 1857 | |
| 1858 | /* Chain it together */ |
| 1859 | if (prev != NULL) { |
| 1860 | prev->tl_next = cur_tx; |
| 1861 | prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); |
| 1862 | } |
| 1863 | prev = cur_tx; |
| 1864 | |
| 1865 | BPF_MTAP(ifp, cur_tx->tl_mbuf); |
| 1866 | } |
| 1867 | |
| 1868 | /* |
| 1869 | * If there are no packets queued, bail. |
| 1870 | */ |
| 1871 | if (cur_tx == NULL) |
| 1872 | return; |
| 1873 | |
| 1874 | /* |
| 1875 | * That's all we can stands, we can't stands no more. |
| 1876 | * If there are no other transfers pending, then issue the |
| 1877 | * TX GO command to the adapter to start things moving. |
| 1878 | * Otherwise, just leave the data in the queue and let |
| 1879 | * the EOF/EOC interrupt handler send. |
| 1880 | */ |
| 1881 | if (sc->tl_cdata.tl_tx_head == NULL) { |
| 1882 | sc->tl_cdata.tl_tx_head = start_tx; |
| 1883 | sc->tl_cdata.tl_tx_tail = cur_tx; |
| 1884 | |
| 1885 | if (sc->tl_txeoc) { |
| 1886 | sc->tl_txeoc = 0; |
| 1887 | CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); |
| 1888 | cmd = CSR_READ_4(sc, TL_HOSTCMD); |
| 1889 | cmd &= ~TL_CMD_RT; |
| 1890 | cmd |= TL_CMD_GO|TL_CMD_INTSON; |
| 1891 | CMD_PUT(sc, cmd); |
| 1892 | } |
| 1893 | } else { |
| 1894 | sc->tl_cdata.tl_tx_tail->tl_next = start_tx; |
| 1895 | sc->tl_cdata.tl_tx_tail = cur_tx; |
| 1896 | } |
| 1897 | |
| 1898 | /* |
| 1899 | * Set a timeout in case the chip goes out to lunch. |
| 1900 | */ |
| 1901 | ifp->if_timer = 5; |
| 1902 | |
| 1903 | return; |
| 1904 | } |
| 1905 | |
| 1906 | static void |
| 1907 | tl_init(void *xsc) |
| 1908 | { |
| 1909 | struct tl_softc *sc = xsc; |
| 1910 | struct ifnet *ifp = &sc->arpcom.ac_if; |
| 1911 | struct mii_data *mii; |
| 1912 | |
| 1913 | /* |
| 1914 | * Cancel pending I/O. |
| 1915 | */ |
| 1916 | tl_stop(sc); |
| 1917 | |
| 1918 | /* Initialize TX FIFO threshold */ |
| 1919 | tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); |
| 1920 | tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); |
| 1921 | |
| 1922 | /* Set PCI burst size */ |
| 1923 | tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); |
| 1924 | |
| 1925 | /* |
| 1926 | * Set 'capture all frames' bit for promiscuous mode. |
| 1927 | */ |
| 1928 | if (ifp->if_flags & IFF_PROMISC) |
| 1929 | tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); |
| 1930 | else |
| 1931 | tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); |
| 1932 | |
| 1933 | /* |
| 1934 | * Set capture broadcast bit to capture broadcast frames. |
| 1935 | */ |
| 1936 | if (ifp->if_flags & IFF_BROADCAST) |
| 1937 | tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); |
| 1938 | else |
| 1939 | tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); |
| 1940 | |
| 1941 | tl_dio_write16(sc, TL_MAXRX, MCLBYTES); |
| 1942 | |
| 1943 | /* Init our MAC address */ |
| 1944 | tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0); |
| 1945 | |
| 1946 | /* Init multicast filter, if needed. */ |
| 1947 | tl_setmulti(sc); |
| 1948 | |
| 1949 | /* Init circular RX list. */ |
| 1950 | if (tl_list_rx_init(sc) == ENOBUFS) { |
| 1951 | if_printf(ifp, "initialization failed: no " |
| 1952 | "memory for rx buffers\n"); |
| 1953 | tl_stop(sc); |
| 1954 | return; |
| 1955 | } |
| 1956 | |
| 1957 | /* Init TX pointers. */ |
| 1958 | tl_list_tx_init(sc); |
| 1959 | |
| 1960 | /* Enable PCI interrupts. */ |
| 1961 | CMD_SET(sc, TL_CMD_INTSON); |
| 1962 | |
| 1963 | /* Load the address of the rx list */ |
| 1964 | CMD_SET(sc, TL_CMD_RT); |
| 1965 | CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); |
| 1966 | |
| 1967 | if (!sc->tl_bitrate) { |
| 1968 | if (sc->tl_miibus != NULL) { |
| 1969 | mii = device_get_softc(sc->tl_miibus); |
| 1970 | mii_mediachg(mii); |
| 1971 | } |
| 1972 | } |
| 1973 | |
| 1974 | /* Send the RX go command */ |
| 1975 | CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); |
| 1976 | |
| 1977 | ifp->if_flags |= IFF_RUNNING; |
| 1978 | ifp->if_flags &= ~IFF_OACTIVE; |
| 1979 | |
| 1980 | /* Start the stats update counter */ |
| 1981 | callout_reset(&sc->tl_stat_timer, hz, tl_stats_update, sc); |
| 1982 | } |
| 1983 | |
| 1984 | /* |
| 1985 | * Set media options. |
| 1986 | */ |
| 1987 | static int |
| 1988 | tl_ifmedia_upd(struct ifnet *ifp) |
| 1989 | { |
| 1990 | struct tl_softc *sc; |
| 1991 | struct mii_data *mii = NULL; |
| 1992 | |
| 1993 | sc = ifp->if_softc; |
| 1994 | |
| 1995 | if (sc->tl_bitrate) |
| 1996 | tl_setmode(sc, sc->ifmedia.ifm_media); |
| 1997 | else { |
| 1998 | mii = device_get_softc(sc->tl_miibus); |
| 1999 | mii_mediachg(mii); |
| 2000 | } |
| 2001 | |
| 2002 | return(0); |
| 2003 | } |
| 2004 | |
| 2005 | /* |
| 2006 | * Report current media status. |
| 2007 | */ |
| 2008 | static void |
| 2009 | tl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 2010 | { |
| 2011 | struct tl_softc *sc; |
| 2012 | struct mii_data *mii; |
| 2013 | |
| 2014 | sc = ifp->if_softc; |
| 2015 | |
| 2016 | ifmr->ifm_active = IFM_ETHER; |
| 2017 | |
| 2018 | if (sc->tl_bitrate) { |
| 2019 | if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) |
| 2020 | ifmr->ifm_active = IFM_ETHER|IFM_10_5; |
| 2021 | else |
| 2022 | ifmr->ifm_active = IFM_ETHER|IFM_10_T; |
| 2023 | if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) |
| 2024 | ifmr->ifm_active |= IFM_HDX; |
| 2025 | else |
| 2026 | ifmr->ifm_active |= IFM_FDX; |
| 2027 | return; |
| 2028 | } else { |
| 2029 | mii = device_get_softc(sc->tl_miibus); |
| 2030 | mii_pollstat(mii); |
| 2031 | ifmr->ifm_active = mii->mii_media_active; |
| 2032 | ifmr->ifm_status = mii->mii_media_status; |
| 2033 | } |
| 2034 | |
| 2035 | return; |
| 2036 | } |
| 2037 | |
| 2038 | static int |
| 2039 | tl_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) |
| 2040 | { |
| 2041 | struct tl_softc *sc = ifp->if_softc; |
| 2042 | struct ifreq *ifr = (struct ifreq *) data; |
| 2043 | int error = 0; |
| 2044 | |
| 2045 | switch(command) { |
| 2046 | case SIOCSIFFLAGS: |
| 2047 | if (ifp->if_flags & IFF_UP) { |
| 2048 | if (ifp->if_flags & IFF_RUNNING && |
| 2049 | ifp->if_flags & IFF_PROMISC && |
| 2050 | !(sc->tl_if_flags & IFF_PROMISC)) { |
| 2051 | tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); |
| 2052 | tl_setmulti(sc); |
| 2053 | } else if (ifp->if_flags & IFF_RUNNING && |
| 2054 | !(ifp->if_flags & IFF_PROMISC) && |
| 2055 | sc->tl_if_flags & IFF_PROMISC) { |
| 2056 | tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); |
| 2057 | tl_setmulti(sc); |
| 2058 | } else |
| 2059 | tl_init(sc); |
| 2060 | } else { |
| 2061 | if (ifp->if_flags & IFF_RUNNING) { |
| 2062 | tl_stop(sc); |
| 2063 | } |
| 2064 | } |
| 2065 | sc->tl_if_flags = ifp->if_flags; |
| 2066 | error = 0; |
| 2067 | break; |
| 2068 | case SIOCADDMULTI: |
| 2069 | case SIOCDELMULTI: |
| 2070 | tl_setmulti(sc); |
| 2071 | error = 0; |
| 2072 | break; |
| 2073 | case SIOCSIFMEDIA: |
| 2074 | case SIOCGIFMEDIA: |
| 2075 | if (sc->tl_bitrate) |
| 2076 | error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); |
| 2077 | else { |
| 2078 | struct mii_data *mii; |
| 2079 | mii = device_get_softc(sc->tl_miibus); |
| 2080 | error = ifmedia_ioctl(ifp, ifr, |
| 2081 | &mii->mii_media, command); |
| 2082 | } |
| 2083 | break; |
| 2084 | default: |
| 2085 | error = ether_ioctl(ifp, command, data); |
| 2086 | break; |
| 2087 | } |
| 2088 | return(error); |
| 2089 | } |
| 2090 | |
| 2091 | static void |
| 2092 | tl_watchdog(struct ifnet *ifp) |
| 2093 | { |
| 2094 | struct tl_softc *sc; |
| 2095 | |
| 2096 | sc = ifp->if_softc; |
| 2097 | |
| 2098 | if_printf(ifp, "device timeout\n"); |
| 2099 | |
| 2100 | ifp->if_oerrors++; |
| 2101 | |
| 2102 | tl_softreset(sc, 1); |
| 2103 | tl_init(sc); |
| 2104 | |
| 2105 | return; |
| 2106 | } |
| 2107 | |
| 2108 | /* |
| 2109 | * Stop the adapter and free any mbufs allocated to the |
| 2110 | * RX and TX lists. |
| 2111 | */ |
| 2112 | static void |
| 2113 | tl_stop(struct tl_softc *sc) |
| 2114 | { |
| 2115 | int i; |
| 2116 | struct ifnet *ifp; |
| 2117 | |
| 2118 | ifp = &sc->arpcom.ac_if; |
| 2119 | |
| 2120 | /* Stop the stats updater. */ |
| 2121 | callout_stop(&sc->tl_stat_timer); |
| 2122 | |
| 2123 | /* Stop the transmitter */ |
| 2124 | CMD_CLR(sc, TL_CMD_RT); |
| 2125 | CMD_SET(sc, TL_CMD_STOP); |
| 2126 | CSR_WRITE_4(sc, TL_CH_PARM, 0); |
| 2127 | |
| 2128 | /* Stop the receiver */ |
| 2129 | CMD_SET(sc, TL_CMD_RT); |
| 2130 | CMD_SET(sc, TL_CMD_STOP); |
| 2131 | CSR_WRITE_4(sc, TL_CH_PARM, 0); |
| 2132 | |
| 2133 | /* |
| 2134 | * Disable host interrupts. |
| 2135 | */ |
| 2136 | CMD_SET(sc, TL_CMD_INTSOFF); |
| 2137 | |
| 2138 | /* |
| 2139 | * Clear list pointer. |
| 2140 | */ |
| 2141 | CSR_WRITE_4(sc, TL_CH_PARM, 0); |
| 2142 | |
| 2143 | /* |
| 2144 | * Free the RX lists. |
| 2145 | */ |
| 2146 | for (i = 0; i < TL_RX_LIST_CNT; i++) { |
| 2147 | if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { |
| 2148 | m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); |
| 2149 | sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; |
| 2150 | } |
| 2151 | } |
| 2152 | bzero((char *)&sc->tl_ldata->tl_rx_list, |
| 2153 | sizeof(sc->tl_ldata->tl_rx_list)); |
| 2154 | |
| 2155 | /* |
| 2156 | * Free the TX list buffers. |
| 2157 | */ |
| 2158 | for (i = 0; i < TL_TX_LIST_CNT; i++) { |
| 2159 | if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { |
| 2160 | m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); |
| 2161 | sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; |
| 2162 | } |
| 2163 | } |
| 2164 | bzero((char *)&sc->tl_ldata->tl_tx_list, |
| 2165 | sizeof(sc->tl_ldata->tl_tx_list)); |
| 2166 | |
| 2167 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
| 2168 | |
| 2169 | return; |
| 2170 | } |
| 2171 | |
| 2172 | /* |
| 2173 | * Stop all chip I/O so that the kernel's probe routines don't |
| 2174 | * get confused by errant DMAs when rebooting. |
| 2175 | */ |
| 2176 | static void |
| 2177 | tl_shutdown(device_t dev) |
| 2178 | { |
| 2179 | struct tl_softc *sc; |
| 2180 | |
| 2181 | sc = device_get_softc(dev); |
| 2182 | |
| 2183 | tl_stop(sc); |
| 2184 | |
| 2185 | return; |
| 2186 | } |