if: Move IFF_OACTIVE bit into ifaltq; prepare multiple TX queues support
[dragonfly.git] / sys / dev / netif / fxp / if_fxp.c
index 0a34c33..4f45277 100644 (file)
@@ -32,7 +32,7 @@
  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
  */
 
-#include "opt_polling.h"
+#include "opt_ifpoll.h"
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -56,6 +56,7 @@
 
 #include <net/ethernet.h>
 #include <net/if_arp.h>
+#include <net/if_poll.h>
 
 #include <vm/vm.h>             /* for vtophys */
 #include <vm/pmap.h>           /* for vtophys */
@@ -234,8 +235,9 @@ static void         fxp_miibus_writereg(device_t dev, int phy, int reg,
 static void            fxp_load_ucode(struct fxp_softc *sc);
 static int             sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
 static int             sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
-#ifdef DEVICE_POLLING
-static poll_handler_t fxp_poll;
+#ifdef IFPOLL_ENABLE
+static void            fxp_npoll(struct ifnet *, struct ifpoll_info *);
+static void            fxp_npoll_compat(struct ifnet *, void *, int);
 #endif
 
 static void            fxp_lwcopy(volatile u_int32_t *src,
@@ -649,8 +651,8 @@ fxp_attach(device_t dev)
        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
        ifp->if_ioctl = fxp_ioctl;
        ifp->if_start = fxp_start;
-#ifdef DEVICE_POLLING
-       ifp->if_poll = fxp_poll;
+#ifdef IFPOLL_ENABLE
+       ifp->if_npoll = fxp_npoll;
 #endif
        ifp->if_watchdog = fxp_watchdog;
 
@@ -659,6 +661,12 @@ fxp_attach(device_t dev)
         */
        ether_ifattach(ifp, sc->arpcom.ac_enaddr, NULL);
 
+#ifdef IFPOLL_ENABLE
+       ifpoll_compat_setup(&sc->fxp_npoll,
+           &sc->sysctl_ctx, sc->sysctl_tree, device_get_unit(dev),
+           ifp->if_serializer);
+#endif
+
        /*
         * Tell the upper layer(s) we support long frames.
         */
@@ -1056,7 +1064,7 @@ fxp_start(struct ifnet *ifp)
                return;
        }
 
-       if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+       if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
                return;
 
        txp = NULL;
@@ -1177,7 +1185,7 @@ tbdinit:
        }
 
        if (sc->tx_queued >= FXP_USABLE_TXCB)
-               ifp->if_flags |= IFF_OACTIVE;
+               ifq_set_oactive(&ifp->if_snd);
 
        /*
         * We're finished. If we added to the list, issue a RESUME to get DMA
@@ -1189,46 +1197,66 @@ tbdinit:
        }
 }
 
-#ifdef DEVICE_POLLING
+#ifdef IFPOLL_ENABLE
 
 static void
-fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+fxp_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
 {
        struct fxp_softc *sc = ifp->if_softc;
        u_int8_t statack;
 
        ASSERT_SERIALIZED(ifp->if_serializer);
 
-       switch(cmd) {
-       case POLL_REGISTER:
-               /* disable interrupts */
-               CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
-               break;
-       case POLL_DEREGISTER:
-               /* enable interrupts */
-               CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
-               break;
-       default:
-               statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
-                         FXP_SCB_STATACK_FR;
-               if (cmd == POLL_AND_CHECK_STATUS) {
-                       u_int8_t tmp;
-
-                       tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
-                       if (tmp == 0xff || tmp == 0)
-                               return; /* nothing to do */
-                       tmp &= ~statack;
-                       /* ack what we can */
-                       if (tmp != 0)
-                               CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
-                       statack |= tmp;
+       statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
+                 FXP_SCB_STATACK_FR;
+       if (sc->fxp_npoll.ifpc_stcount-- == 0) {
+               u_int8_t tmp;
+
+               sc->fxp_npoll.ifpc_stcount = sc->fxp_npoll.ifpc_stfrac;
+
+               tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
+               if (tmp == 0xff || tmp == 0)
+                       return; /* nothing to do */
+               tmp &= ~statack;
+               /* ack what we can */
+               if (tmp != 0)
+                       CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
+               statack |= tmp;
+       }
+       fxp_intr_body(sc, statack, count);
+}
+
+static void
+fxp_npoll(struct ifnet *ifp, struct ifpoll_info *info)
+{
+       struct fxp_softc *sc = ifp->if_softc;
+
+       ASSERT_SERIALIZED(ifp->if_serializer);
+
+       if (info != NULL) {
+               int cpuid = sc->fxp_npoll.ifpc_cpuid;
+
+               info->ifpi_rx[cpuid].poll_func = fxp_npoll_compat;
+               info->ifpi_rx[cpuid].arg = NULL;
+               info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
+
+               if (ifp->if_flags & IFF_RUNNING) {
+                       /* disable interrupts */
+                       CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
+                           FXP_SCB_INTR_DISABLE);
+                       sc->fxp_npoll.ifpc_stcount = 0;
                }
-               fxp_intr_body(sc, statack, count);
-               break;
+               ifp->if_npoll_cpuid = cpuid;
+       } else {
+               if (ifp->if_flags & IFF_RUNNING) {
+                       /* enable interrupts */
+                       CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
+               }
+               ifp->if_npoll_cpuid = -1;
        }
 }
 
-#endif /* DEVICE_POLLING */
+#endif /* IFPOLL_ENABLE */
 
 /*
  * Process interface interrupts.
@@ -1270,11 +1298,10 @@ fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
        struct mbuf *m;
        struct fxp_rfa *rfa;
        int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
-       struct mbuf_chain chain[MAXCPU];
 
        if (rnr)
                fxp_rnr++;
-#ifdef DEVICE_POLLING
+#ifdef IFPOLL_ENABLE
        /* Pick up a deferred RNR condition if `count' ran out last time. */
        if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
                sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
@@ -1312,7 +1339,7 @@ fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
                sc->cbl_first = txp;
 
                if (sc->tx_queued < FXP_USABLE_TXCB)
-                       ifp->if_flags &= ~IFF_OACTIVE;
+                       ifq_clr_oactive(&ifp->if_snd);
 
                if (sc->tx_queued == 0) {
                        ifp->if_timer = 0;
@@ -1333,8 +1360,6 @@ fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
        if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
                return;
 
-       ether_input_chain_init(chain);
-
        /*
         * Process receiver interrupts. If a no-resource (RNR)
         * condition exists, get whatever packets we can and
@@ -1352,7 +1377,7 @@ fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
                rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
                                         RFA_ALIGNMENT_FUDGE);
 
-#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
+#ifdef IFPOLL_ENABLE /* loop at most count times if count >=0 */
                if (count >= 0 && count-- == 0) {
                        if (rnr) {
                                /* Defer RNR processing until the next time. */
@@ -1361,7 +1386,7 @@ fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
                        }
                        break;
                }
-#endif /* DEVICE_POLLING */
+#endif /* IFPOLL_ENABLE */
 
                if ( (rfa->rfa_status & FXP_RFA_STATUS_C) == 0)
                        break;
@@ -1397,12 +1422,10 @@ fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
                                continue;
                        }
                        m->m_pkthdr.len = m->m_len = total_len;
-                       ether_input_chain(ifp, m, NULL, chain);
+                       ifp->if_input(ifp, m);
                }
        }
 
-       ether_input_dispatch(chain);
-
        if (rnr) {
                fxp_scb_wait(sc);
                CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
@@ -1481,7 +1504,7 @@ fxp_tick(void *xsc)
        sc->cbl_first = txp;
 
        if (sc->tx_queued < FXP_USABLE_TXCB)
-               ifp->if_flags &= ~IFF_OACTIVE;
+               ifq_clr_oactive(&ifp->if_snd);
        if (sc->tx_queued == 0)
                ifp->if_timer = 0;
 
@@ -1553,7 +1576,8 @@ fxp_stop(struct fxp_softc *sc)
 
        ASSERT_SERIALIZED(ifp->if_serializer);
 
-       ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+       ifp->if_flags &= ~IFF_RUNNING;
+       ifq_clr_oactive(&ifp->if_snd);
        ifp->if_timer = 0;
 
        /*
@@ -1848,20 +1872,21 @@ fxp_init(void *xsc)
                mii_mediachg(device_get_softc(sc->miibus));
 
        ifp->if_flags |= IFF_RUNNING;
-       ifp->if_flags &= ~IFF_OACTIVE;
+       ifq_clr_oactive(&ifp->if_snd);
 
        /*
         * Enable interrupts.
         */
-#ifdef DEVICE_POLLING
+#ifdef IFPOLL_ENABLE
        /*
         * ... but only do that if we are not polling. And because (presumably)
         * the default is interrupts on, we need to disable them explicitly!
         */
-       if ( ifp->if_flags & IFF_POLLING )
+       if (ifp->if_flags & IFF_NPOLLING) {
                CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
-       else
-#endif /* DEVICE_POLLING */
+               sc->fxp_npoll.ifpc_stcount = 0;
+       } else
+#endif /* IFPOLL_ENABLE */
        CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
 
        /*