2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * This module implements netmap support on top of standard,
28 * unmodified device drivers.
30 * A NIOCREGIF request is handled here if the device does not
31 * have native support. TX and RX rings are emulated as follows:
34 * We preallocate a block of TX mbufs (roughly as many as
35 * tx descriptors; the number is not critical) to speed up
36 * operation during transmissions. The refcount on most of
37 * these buffers is artificially bumped up so we can recycle
38 * them more easily. Also, the destructor is intercepted
39 * so we use it as an interrupt notification to wake up
40 * processes blocked on a poll().
42 * For each receive ring we allocate one "struct mbq"
43 * (an mbuf tailq plus a spinlock). We intercept packets
45 * on the receive path and put them in the mbq from which
46 * netmap receive routines can grab them.
49 * in the generic_txsync() routine, netmap buffers are copied
50 * (or linked, in a future) to the preallocated mbufs
51 * and pushed to the transmit queue. Some of these mbufs
52 * (those with NS_REPORT, or otherwise every half ring)
53 * have the refcount=1, others have refcount=2.
54 * When the destructor is invoked, we take that as
55 * a notification that all mbufs up to that one in
56 * the specific ring have been completed, and generate
57 * the equivalent of a transmit interrupt.
64 #include <sys/cdefs.h> /* prerequisite */
65 __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $");
67 #include <sys/types.h>
68 #include <sys/errno.h>
69 #include <sys/malloc.h>
70 #include <sys/lock.h> /* PROT_EXEC */
71 #include <sys/socket.h> /* sockaddrs */
72 #include <sys/event.h>
74 #include <net/if_var.h>
75 #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */
77 // XXX temporary - D() defined here
78 #include <net/netmap.h>
79 #include <net/netmap/netmap_kern.h>
80 #include <net/netmap/netmap_mem2.h>
82 #define rtnl_lock() D("rtnl_lock called");
83 #define rtnl_unlock() D("rtnl_lock called");
84 #define MBUF_TXQ(m) ((m)->m_pkthdr.hash)
92 * we allocate an EXT_PACKET
94 #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)
96 /* mbuf destructor, also need to change the type to EXT_EXTREF,
97 * add an M_NOFREE flag, and then clear the flag and
98 * chain into uma_zfree(zone_pack, mf)
99 * (or reinstall the buffer ?)
101 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
102 (m)->m_ext.ext_free = (void *)fn; \
103 /* (m)->m_ext.ext_type = EXT_EXTREF; */ \
107 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1)
109 /* ======================== usage stats =========================== */
115 unsigned long txsync;
119 unsigned long rxsync;
122 struct rate_context {
124 struct timer_list timer;
125 struct rate_stats new;
126 struct rate_stats old;
129 #define RATE_PRINTK(_NAME_) \
130 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
131 #define RATE_PERIOD 2
132 static void rate_callback(unsigned long arg)
134 struct rate_context * ctx = (struct rate_context *)arg;
135 struct rate_stats cur = ctx->new;
147 r = mod_timer(&ctx->timer, jiffies +
148 msecs_to_jiffies(RATE_PERIOD * 1000));
150 D("[v1000] Error: mod_timer()");
153 static struct rate_context rate_ctx;
160 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */
161 #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */
164 * Wrapper used by the generic adapter layer to notify
165 * the poller threads. Differently from netmap_rx_irq(), we check
166 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq.
169 netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done)
171 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP)))
174 return netmap_common_irq(ifp, q, work_done);
178 /* Enable/disable netmap mode for a generic network interface. */
179 int generic_netmap_register(struct netmap_adapter *na, int enable)
181 struct ifnet *ifp = na->ifp;
182 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
191 error = ifp->netdev_ops->ndo_stop(ifp);
195 #endif /* REG_RESET */
197 if (enable) { /* Enable netmap mode. */
198 /* Initialize the rx queue, as generic_rx_handler() can
199 * be called as soon as netmap_catch_rx() returns.
201 for (r=0; r<na->num_rx_rings; r++) {
202 mbq_safe_init(&na->rx_rings[r].rx_queue);
203 na->rx_rings[r].nr_ntc = 0;
206 /* Init the mitigation timer. */
207 netmap_mitigation_init(gna);
210 * Preallocate packet buffers for the tx rings.
212 for (r=0; r<na->num_tx_rings; r++) {
213 na->tx_rings[r].nr_ntc = 0;
214 na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *),
215 M_DEVBUF, M_NOWAIT | M_ZERO);
216 if (!na->tx_rings[r].tx_pool) {
217 D("tx_pool allocation failed");
221 for (i=0; i<na->num_tx_desc; i++) {
222 m = netmap_get_mbuf(GENERIC_BUF_SIZE);
224 D("tx_pool[%d] allocation failed", i);
228 na->tx_rings[r].tx_pool[i] = m;
232 /* Prepare to intercept incoming traffic. */
233 error = netmap_catch_rx(na, 1);
235 D("netdev_rx_handler_register() failed");
236 goto register_handler;
238 ifp->if_capenable |= IFCAP_NETMAP;
240 /* Make netmap control the packet steering. */
241 netmap_catch_packet_steering(gna, 1);
246 if (rate_ctx.refcount == 0) {
248 memset(&rate_ctx, 0, sizeof(rate_ctx));
249 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
250 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
251 D("Error: mod_timer()");
257 } else { /* Disable netmap mode. */
260 ifp->if_capenable &= ~IFCAP_NETMAP;
262 /* Release packet steering control. */
263 netmap_catch_packet_steering(gna, 0);
265 /* Do not intercept packets on the rx path. */
266 netmap_catch_rx(na, 0);
270 /* Free the mbufs going to the netmap rings */
271 for (r=0; r<na->num_rx_rings; r++) {
272 mbq_safe_purge(&na->rx_rings[r].rx_queue);
273 mbq_safe_destroy(&na->rx_rings[r].rx_queue);
276 netmap_mitigation_cleanup(gna);
278 for (r=0; r<na->num_tx_rings; r++) {
279 for (i=0; i<na->num_tx_desc; i++) {
280 m_freem(na->tx_rings[r].tx_pool[i]);
282 kfree(na->tx_rings[r].tx_pool, M_DEVBUF);
286 if (--rate_ctx.refcount == 0) {
288 del_timer(&rate_ctx.timer);
294 error = ifp->netdev_ops->ndo_open(ifp);
306 i = na->num_tx_desc; /* Useless, but just to stay safe. */
311 m_freem(na->tx_rings[r].tx_pool[i]);
313 kfree(na->tx_rings[r].tx_pool, M_DEVBUF);
314 i = na->num_tx_desc - 1;
321 * Callback invoked when the device driver frees an mbuf used
322 * by netmap to transmit a packet. This usually happens when
323 * the NIC notifies the driver that transmission is completed.
326 generic_mbuf_destructor(struct mbuf *m)
329 D("Tx irq (%p) queue %d", m, MBUF_TXQ(m));
330 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL);
332 m->m_ext.ext_type = EXT_PACKET;
334 m->m_ext.ext_free = NULL;
336 if (*(m->m_ext.ref_cnt) == 0)
337 *(m->m_ext.ref_cnt) = 1;
338 uma_zfree(zone_pack, m);
340 IFRATE(rate_ctx.new.txirq++);
343 /* Record completed transmissions and update hwavail.
345 * nr_ntc is the oldest tx buffer not yet completed
346 * (same as nr_hwavail + nr_hwcur + 1),
347 * nr_hwcur is the first unsent buffer.
348 * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur.
351 generic_netmap_tx_clean(struct netmap_kring *kring)
353 u_int num_slots = kring->nkr_num_slots;
354 u_int ntc = kring->nr_ntc;
355 u_int hwcur = kring->nr_hwcur;
357 struct mbuf **tx_pool = kring->tx_pool;
359 while (ntc != hwcur) { /* buffers not completed */
360 struct mbuf *m = tx_pool[ntc];
362 if (unlikely(m == NULL)) {
363 /* try to replenish the entry */
364 tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
365 if (unlikely(m == NULL)) {
366 D("mbuf allocation failed, XXX error");
367 // XXX how do we proceed ? break ?
371 } else if (GET_MBUF_REFCNT(m) != 1) {
372 break; /* This mbuf is still busy: its refcnt is 2. */
375 if (unlikely(++ntc == num_slots)) {
381 kring->nr_hwavail += n;
382 ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail);
389 * We have pending packets in the driver between nr_ntc and j.
390 * Compute a position in the middle, to be used to generate
394 generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur)
396 u_int n = kring->nkr_num_slots;
397 u_int ntc = kring->nr_ntc;
401 e = (hwcur + ntc) / 2;
402 } else { /* wrap around */
403 e = (hwcur + n + ntc) / 2;
409 if (unlikely(e >= n)) {
410 D("This cannot happen");
418 * We have pending packets in the driver between nr_ntc and hwcur.
419 * Schedule a notification approximately in the middle of the two.
420 * There is a race but this is only called within txsync which does
424 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
429 if (kring->nr_ntc == hwcur) {
432 e = generic_tx_event_middle(kring, hwcur);
434 m = kring->tx_pool[e];
436 /* This can happen if there is already an event on the netmap
437 slot 'e': There is nothing to do. */
440 ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m));
441 kring->tx_pool[e] = NULL;
442 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
445 /* Decrement the refcount an free it if we have the last one. */
452 * generic_netmap_txsync() transforms netmap buffers into mbufs
453 * and passes them to the standard device driver
454 * (ndo_start_xmit() or ifp->if_transmit() ).
455 * On linux this is not done directly, but using dev_queue_xmit(),
456 * since it implements the TX flow control (and takes some locks).
459 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
461 struct ifnet *ifp = na->ifp;
462 struct netmap_kring *kring = &na->tx_rings[ring_nr];
463 struct netmap_ring *ring = kring->ring;
464 u_int j, k, num_slots = kring->nkr_num_slots;
467 IFRATE(rate_ctx.new.txsync++);
469 // TODO: handle the case of mbuf allocation failure
470 /* first, reclaim completed buffers */
471 generic_netmap_tx_clean(kring);
473 /* Take a copy of ring->cur now, and never read it again. */
475 if (unlikely(k >= num_slots)) {
476 return netmap_ring_reinit(kring);
482 * 'new_slots' counts how many new slots have been added:
483 * everything from hwcur to cur, excluding reserved ones, if any.
484 * nr_hwreserved start from hwcur and counts how many slots were
485 * not sent to the NIC from the previous round.
487 new_slots = k - j - kring->nr_hwreserved;
489 new_slots += num_slots;
493 /* Process new packets to send:
494 * j is the current index in the netmap ring.
497 struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */
498 void *addr = NMB(slot);
499 u_int len = slot->len;
503 if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) {
504 return netmap_ring_reinit(kring);
506 /* Tale a mbuf from the tx pool and copy in the user packet. */
507 m = kring->tx_pool[j];
509 RD(5, "This should never happen");
510 kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
511 if (unlikely(m == NULL)) {
512 D("mbuf allocation failed");
516 /* XXX we should ask notifications when NS_REPORT is set,
517 * or roughly every half frame. We can optimize this
518 * by lazily requesting notifications only when a
519 * transmission fails. Probably the best way is to
520 * break on failures and set notifications when
521 * ring->avail == 0 || j != k
523 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr);
524 if (unlikely(tx_ret)) {
525 RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]",
526 tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail);
528 * No room for this mbuf in the device driver.
529 * Request a notification FOR A PREVIOUS MBUF,
530 * then call generic_netmap_tx_clean(kring) to do the
531 * double check and see if we can free more buffers.
532 * If there is space continue, else break;
533 * NOTE: the double check is necessary if the problem
534 * occurs in the txsync call after selrecord().
535 * Also, we need some way to tell the caller that not
536 * all buffers were queued onto the device (this was
537 * not a problem with native netmap driver where space
538 * is preallocated). The bridge has a similar problem
539 * and we solve it there by dropping the excess packets.
541 generic_set_tx_event(kring, j);
542 if (generic_netmap_tx_clean(kring)) { /* space now available */
548 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
549 if (unlikely(++j == num_slots))
554 /* Update hwcur to the next slot to transmit. */
558 * Report all new slots as unavailable, even those not sent.
559 * We account for them with with hwreserved, so that
560 * nr_hwreserved =:= cur - nr_hwcur
562 kring->nr_hwavail -= new_slots;
563 kring->nr_hwreserved = k - j;
564 if (kring->nr_hwreserved < 0) {
565 kring->nr_hwreserved += num_slots;
568 IFRATE(rate_ctx.new.txpkt += ntx);
570 if (!kring->nr_hwavail) {
571 /* No more available slots? Set a notification event
572 * on a netmap slot that will be cleaned in the future.
573 * No doublecheck is performed, since txsync() will be
574 * called twice by netmap_poll().
576 generic_set_tx_event(kring, j);
578 ND("tx #%d, hwavail = %d", n, kring->nr_hwavail);
581 /* Synchronize the user's view to the kernel view. */
582 ring->avail = kring->nr_hwavail;
583 ring->reserved = kring->nr_hwreserved;
589 * This handler is registered (through netmap_catch_rx())
590 * within the attached network interface
591 * in the RX subsystem, so that every mbuf passed up by
592 * the driver can be stolen to the network stack.
593 * Stolen packets are put in a queue where the
594 * generic_netmap_rxsync() callback can extract them.
596 void generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
598 struct netmap_adapter *na = NA(ifp);
599 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
601 u_int rr = 0; // receive ring number
604 /* limit the size of the queue */
605 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) {
608 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m);
611 if (netmap_generic_mit < 32768) {
612 /* no rx mitigation, pass notification up */
613 netmap_generic_irq(na->ifp, rr, &work_done);
614 IFRATE(rate_ctx.new.rxirq++);
616 /* same as send combining, filter notification if there is a
617 * pending timer, otherwise pass it up and start a timer.
619 if (likely(netmap_mitigation_active(gna))) {
620 /* Record that there is some pending work. */
621 gna->mit_pending = 1;
623 netmap_generic_irq(na->ifp, rr, &work_done);
624 IFRATE(rate_ctx.new.rxirq++);
625 netmap_mitigation_start(gna);
631 * generic_netmap_rxsync() extracts mbufs from the queue filled by
632 * generic_netmap_rx_handler() and puts their content in the netmap
634 * Access must be protected because the rx handler is asynchronous,
637 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
639 struct netmap_kring *kring = &na->rx_rings[ring_nr];
640 struct netmap_ring *ring = kring->ring;
641 u_int j, n, lim = kring->nkr_num_slots - 1;
642 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
643 u_int k, resvd = ring->reserved;
646 return netmap_ring_reinit(kring);
648 /* Import newly received packets into the netmap ring. */
649 if (netmap_no_pendintr || force_update) {
650 uint16_t slot_flags = kring->nkr_slot_flags;
654 j = kring->nr_ntc; /* first empty slot in the receive ring */
655 /* extract buffers from the rx queue, stop at most one
656 * slot before nr_hwcur (index k)
658 k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim;
661 void *addr = NMB(&ring->slot[j]);
663 if (addr == netmap_buffer_base) { /* Bad buffer */
664 return netmap_ring_reinit(kring);
667 * Call the locked version of the function.
668 * XXX Ideally we could grab a batch of mbufs at once,
669 * by changing rx_queue into a ring.
671 m = mbq_safe_dequeue(&kring->rx_queue);
675 m_copydata(m, 0, len, addr);
676 ring->slot[j].len = len;
677 ring->slot[j].flags = slot_flags;
679 if (unlikely(j++ == lim))
685 kring->nr_hwavail += n;
686 IFRATE(rate_ctx.new.rxpkt += n);
688 kring->nr_kflags &= ~NKR_PENDINTR;
691 // XXX should we invert the order ?
692 /* Skip past packets that userspace has released */
696 if (resvd + ring->avail >= lim + 1) {
697 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
698 ring->reserved = resvd = 0; // XXX panic...
700 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
703 /* Userspace has released some packets. */
704 for (n = 0; j != k; n++) {
705 struct netmap_slot *slot = &ring->slot[j];
707 slot->flags &= ~NS_BUF_CHANGED;
708 if (unlikely(j++ == lim))
711 kring->nr_hwavail -= n;
714 /* Tell userspace that there are new packets. */
715 ring->avail = kring->nr_hwavail - resvd;
716 IFRATE(rate_ctx.new.rxsync++);
722 generic_netmap_dtor(struct netmap_adapter *na)
724 struct ifnet *ifp = na->ifp;
725 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
726 struct netmap_adapter *prev_na = gna->prev;
728 if (prev_na != NULL) {
729 D("Released generic NA %p", gna);
733 netmap_adapter_put(prev_na);
737 D("Restored native NA %p", prev_na);
743 * generic_netmap_attach() makes it possible to use netmap on
744 * a device without native netmap support.
745 * This is less performant than native support but potentially
746 * faster than raw sockets or similar schemes.
748 * In this "emulated" mode, netmap rings do not necessarily
749 * have the same size as those in the NIC. We use a default
750 * value and possibly override it if the OS has ways to fetch the
751 * actual configuration.
754 generic_netmap_attach(struct ifnet *ifp)
756 struct netmap_adapter *na;
757 struct netmap_generic_adapter *gna;
759 u_int num_tx_desc, num_rx_desc;
761 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
763 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc);
764 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
766 gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO);
768 D("no memory on attach, give up");
771 na = (struct netmap_adapter *)gna;
773 na->num_tx_desc = num_tx_desc;
774 na->num_rx_desc = num_rx_desc;
775 na->nm_register = &generic_netmap_register;
776 na->nm_txsync = &generic_netmap_txsync;
777 na->nm_rxsync = &generic_netmap_rxsync;
778 na->nm_dtor = &generic_netmap_dtor;
779 /* when using generic, IFCAP_NETMAP is set so we force
780 * NAF_SKIP_INTR to use the regular interrupt handler
782 na->na_flags = NAF_SKIP_INTR;
784 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
785 ifp->num_tx_queues, ifp->real_num_tx_queues,
787 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
788 ifp->num_rx_queues, ifp->real_num_rx_queues);
790 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
792 retval = netmap_attach_common(na);
794 kfree(gna, M_DEVBUF);