netmap: change header includes
[dragonfly.git] / sys / net / netmap / netmap_generic.c
CommitLineData
fb578518
FF
1/*
2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * This module implements netmap support on top of standard,
28 * unmodified device drivers.
29 *
30 * A NIOCREGIF request is handled here if the device does not
31 * have native support. TX and RX rings are emulated as follows:
32 *
33 * NIOCREGIF
34 * We preallocate a block of TX mbufs (roughly as many as
35 * tx descriptors; the number is not critical) to speed up
36 * operation during transmissions. The refcount on most of
37 * these buffers is artificially bumped up so we can recycle
38 * them more easily. Also, the destructor is intercepted
39 * so we use it as an interrupt notification to wake up
40 * processes blocked on a poll().
41 *
42 * For each receive ring we allocate one "struct mbq"
43 * (an mbuf tailq plus a spinlock). We intercept packets
44 * (through if_input)
45 * on the receive path and put them in the mbq from which
46 * netmap receive routines can grab them.
47 *
48 * TX:
49 * in the generic_txsync() routine, netmap buffers are copied
50 * (or linked, in a future) to the preallocated mbufs
51 * and pushed to the transmit queue. Some of these mbufs
52 * (those with NS_REPORT, or otherwise every half ring)
53 * have the refcount=1, others have refcount=2.
54 * When the destructor is invoked, we take that as
55 * a notification that all mbufs up to that one in
56 * the specific ring have been completed, and generate
57 * the equivalent of a transmit interrupt.
58 *
59 * RX:
60 *
61 */
62
fb578518
FF
63
64#include <sys/types.h>
65#include <sys/errno.h>
66#include <sys/malloc.h>
67#include <sys/lock.h> /* PROT_EXEC */
fb578518 68#include <sys/socket.h> /* sockaddrs */
13431b3e 69#include <sys/event.h>
fb578518
FF
70#include <net/if.h>
71#include <net/if_var.h>
ed9bd855 72#include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */
fb578518
FF
73
74// XXX temporary - D() defined here
75#include <net/netmap.h>
b3f97fad
FF
76#include <net/netmap/netmap_kern.h>
77#include <net/netmap/netmap_mem2.h>
fb578518
FF
78
79#define rtnl_lock() D("rtnl_lock called");
80#define rtnl_unlock() D("rtnl_lock called");
bf9f7c16 81#define MBUF_TXQ(m) ((m)->m_pkthdr.hash)
fb578518
FF
82#define smp_mb()
83
84/*
85 * mbuf wrappers
86 */
87
88/*
89 * we allocate an EXT_PACKET
90 */
bf9f7c16 91#define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)
fb578518
FF
92
93/* mbuf destructor, also need to change the type to EXT_EXTREF,
94 * add an M_NOFREE flag, and then clear the flag and
95 * chain into uma_zfree(zone_pack, mf)
96 * (or reinstall the buffer ?)
97 */
98#define SET_MBUF_DESTRUCTOR(m, fn) do { \
99 (m)->m_ext.ext_free = (void *)fn; \
bf9f7c16 100 /* (m)->m_ext.ext_type = EXT_EXTREF; */ \
fb578518
FF
101 } while (0)
102
103
104#define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1)
105
fb578518
FF
106/* ======================== usage stats =========================== */
107
108#ifdef RATE
109#define IFRATE(x) x
110struct rate_stats {
111 unsigned long txpkt;
112 unsigned long txsync;
113 unsigned long txirq;
114 unsigned long rxpkt;
115 unsigned long rxirq;
116 unsigned long rxsync;
117};
118
119struct rate_context {
120 unsigned refcount;
121 struct timer_list timer;
122 struct rate_stats new;
123 struct rate_stats old;
124};
125
126#define RATE_PRINTK(_NAME_) \
127 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
128#define RATE_PERIOD 2
129static void rate_callback(unsigned long arg)
130{
131 struct rate_context * ctx = (struct rate_context *)arg;
132 struct rate_stats cur = ctx->new;
133 int r;
134
135 RATE_PRINTK(txpkt);
136 RATE_PRINTK(txsync);
137 RATE_PRINTK(txirq);
138 RATE_PRINTK(rxpkt);
139 RATE_PRINTK(rxsync);
140 RATE_PRINTK(rxirq);
141 printk("\n");
142
143 ctx->old = cur;
144 r = mod_timer(&ctx->timer, jiffies +
145 msecs_to_jiffies(RATE_PERIOD * 1000));
146 if (unlikely(r))
147 D("[v1000] Error: mod_timer()");
148}
149
150static struct rate_context rate_ctx;
151
152#else /* !RATE */
153#define IFRATE(x)
154#endif /* !RATE */
155
156
157/* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */
158#define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */
159
160/*
161 * Wrapper used by the generic adapter layer to notify
162 * the poller threads. Differently from netmap_rx_irq(), we check
163 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq.
164 */
165static int
166netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done)
167{
168 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP)))
169 return 0;
170
171 return netmap_common_irq(ifp, q, work_done);
172}
173
174
175/* Enable/disable netmap mode for a generic network interface. */
176int generic_netmap_register(struct netmap_adapter *na, int enable)
177{
178 struct ifnet *ifp = na->ifp;
179 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
180 struct mbuf *m;
181 int error;
182 int i, r;
183
184 if (!na)
185 return EINVAL;
186
187#ifdef REG_RESET
188 error = ifp->netdev_ops->ndo_stop(ifp);
189 if (error) {
190 return error;
191 }
192#endif /* REG_RESET */
193
194 if (enable) { /* Enable netmap mode. */
195 /* Initialize the rx queue, as generic_rx_handler() can
196 * be called as soon as netmap_catch_rx() returns.
197 */
198 for (r=0; r<na->num_rx_rings; r++) {
199 mbq_safe_init(&na->rx_rings[r].rx_queue);
200 na->rx_rings[r].nr_ntc = 0;
201 }
202
203 /* Init the mitigation timer. */
204 netmap_mitigation_init(gna);
205
206 /*
207 * Preallocate packet buffers for the tx rings.
208 */
209 for (r=0; r<na->num_tx_rings; r++) {
210 na->tx_rings[r].nr_ntc = 0;
ed9bd855 211 na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *),
fb578518
FF
212 M_DEVBUF, M_NOWAIT | M_ZERO);
213 if (!na->tx_rings[r].tx_pool) {
214 D("tx_pool allocation failed");
215 error = ENOMEM;
216 goto free_tx_pool;
217 }
218 for (i=0; i<na->num_tx_desc; i++) {
219 m = netmap_get_mbuf(GENERIC_BUF_SIZE);
220 if (!m) {
221 D("tx_pool[%d] allocation failed", i);
222 error = ENOMEM;
223 goto free_mbufs;
224 }
225 na->tx_rings[r].tx_pool[i] = m;
226 }
227 }
228 rtnl_lock();
229 /* Prepare to intercept incoming traffic. */
230 error = netmap_catch_rx(na, 1);
231 if (error) {
232 D("netdev_rx_handler_register() failed");
233 goto register_handler;
234 }
235 ifp->if_capenable |= IFCAP_NETMAP;
236
237 /* Make netmap control the packet steering. */
238 netmap_catch_packet_steering(gna, 1);
239
240 rtnl_unlock();
241
242#ifdef RATE
243 if (rate_ctx.refcount == 0) {
244 D("setup_timer()");
245 memset(&rate_ctx, 0, sizeof(rate_ctx));
246 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
247 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
248 D("Error: mod_timer()");
249 }
250 }
251 rate_ctx.refcount++;
252#endif /* RATE */
253
254 } else { /* Disable netmap mode. */
255 rtnl_lock();
256
257 ifp->if_capenable &= ~IFCAP_NETMAP;
258
259 /* Release packet steering control. */
260 netmap_catch_packet_steering(gna, 0);
261
262 /* Do not intercept packets on the rx path. */
263 netmap_catch_rx(na, 0);
264
265 rtnl_unlock();
266
267 /* Free the mbufs going to the netmap rings */
268 for (r=0; r<na->num_rx_rings; r++) {
269 mbq_safe_purge(&na->rx_rings[r].rx_queue);
270 mbq_safe_destroy(&na->rx_rings[r].rx_queue);
271 }
272
273 netmap_mitigation_cleanup(gna);
274
275 for (r=0; r<na->num_tx_rings; r++) {
276 for (i=0; i<na->num_tx_desc; i++) {
277 m_freem(na->tx_rings[r].tx_pool[i]);
278 }
ed9bd855 279 kfree(na->tx_rings[r].tx_pool, M_DEVBUF);
fb578518
FF
280 }
281
282#ifdef RATE
283 if (--rate_ctx.refcount == 0) {
284 D("del_timer()");
285 del_timer(&rate_ctx.timer);
286 }
287#endif
288 }
289
290#ifdef REG_RESET
291 error = ifp->netdev_ops->ndo_open(ifp);
292 if (error) {
293 goto alloc_tx_pool;
294 }
295#endif
296
297 return 0;
298
299register_handler:
300 rtnl_unlock();
301free_tx_pool:
302 r--;
303 i = na->num_tx_desc; /* Useless, but just to stay safe. */
304free_mbufs:
305 i--;
306 for (; r>=0; r--) {
307 for (; i>=0; i--) {
308 m_freem(na->tx_rings[r].tx_pool[i]);
309 }
ed9bd855 310 kfree(na->tx_rings[r].tx_pool, M_DEVBUF);
fb578518
FF
311 i = na->num_tx_desc - 1;
312 }
313
314 return error;
315}
316
317/*
318 * Callback invoked when the device driver frees an mbuf used
319 * by netmap to transmit a packet. This usually happens when
320 * the NIC notifies the driver that transmission is completed.
321 */
322static void
323generic_mbuf_destructor(struct mbuf *m)
324{
325 if (netmap_verbose)
326 D("Tx irq (%p) queue %d", m, MBUF_TXQ(m));
327 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL);
bf9f7c16 328#if 0
fb578518 329 m->m_ext.ext_type = EXT_PACKET;
bf9f7c16 330#endif
fb578518 331 m->m_ext.ext_free = NULL;
bf9f7c16 332#if 0
fb578518
FF
333 if (*(m->m_ext.ref_cnt) == 0)
334 *(m->m_ext.ref_cnt) = 1;
335 uma_zfree(zone_pack, m);
bf9f7c16 336#endif
fb578518
FF
337 IFRATE(rate_ctx.new.txirq++);
338}
339
340/* Record completed transmissions and update hwavail.
341 *
342 * nr_ntc is the oldest tx buffer not yet completed
343 * (same as nr_hwavail + nr_hwcur + 1),
344 * nr_hwcur is the first unsent buffer.
345 * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur.
346 */
347static int
348generic_netmap_tx_clean(struct netmap_kring *kring)
349{
350 u_int num_slots = kring->nkr_num_slots;
351 u_int ntc = kring->nr_ntc;
352 u_int hwcur = kring->nr_hwcur;
353 u_int n = 0;
354 struct mbuf **tx_pool = kring->tx_pool;
355
356 while (ntc != hwcur) { /* buffers not completed */
357 struct mbuf *m = tx_pool[ntc];
358
359 if (unlikely(m == NULL)) {
360 /* try to replenish the entry */
361 tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
362 if (unlikely(m == NULL)) {
363 D("mbuf allocation failed, XXX error");
364 // XXX how do we proceed ? break ?
365 return -ENOMEM;
366 }
bf9f7c16 367#if 0
fb578518
FF
368 } else if (GET_MBUF_REFCNT(m) != 1) {
369 break; /* This mbuf is still busy: its refcnt is 2. */
bf9f7c16 370#endif
fb578518
FF
371 }
372 if (unlikely(++ntc == num_slots)) {
373 ntc = 0;
374 }
375 n++;
376 }
377 kring->nr_ntc = ntc;
378 kring->nr_hwavail += n;
379 ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail);
380
381 return n;
382}
383
384
385/*
386 * We have pending packets in the driver between nr_ntc and j.
387 * Compute a position in the middle, to be used to generate
388 * a notification.
389 */
390static inline u_int
391generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur)
392{
393 u_int n = kring->nkr_num_slots;
394 u_int ntc = kring->nr_ntc;
395 u_int e;
396
397 if (hwcur >= ntc) {
398 e = (hwcur + ntc) / 2;
399 } else { /* wrap around */
400 e = (hwcur + n + ntc) / 2;
401 if (e >= n) {
402 e -= n;
403 }
404 }
405
406 if (unlikely(e >= n)) {
407 D("This cannot happen");
408 e = 0;
409 }
410
411 return e;
412}
413
414/*
415 * We have pending packets in the driver between nr_ntc and hwcur.
416 * Schedule a notification approximately in the middle of the two.
417 * There is a race but this is only called within txsync which does
418 * a double check.
419 */
420static void
421generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
422{
423 struct mbuf *m;
424 u_int e;
425
426 if (kring->nr_ntc == hwcur) {
427 return;
428 }
429 e = generic_tx_event_middle(kring, hwcur);
430
431 m = kring->tx_pool[e];
432 if (m == NULL) {
433 /* This can happen if there is already an event on the netmap
434 slot 'e': There is nothing to do. */
435 return;
436 }
437 ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m));
438 kring->tx_pool[e] = NULL;
439 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
440
441 // XXX wmb() ?
442 /* Decrement the refcount an free it if we have the last one. */
443 m_freem(m);
444 smp_mb();
445}
446
447
448/*
449 * generic_netmap_txsync() transforms netmap buffers into mbufs
450 * and passes them to the standard device driver
451 * (ndo_start_xmit() or ifp->if_transmit() ).
452 * On linux this is not done directly, but using dev_queue_xmit(),
453 * since it implements the TX flow control (and takes some locks).
454 */
455static int
456generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
457{
458 struct ifnet *ifp = na->ifp;
459 struct netmap_kring *kring = &na->tx_rings[ring_nr];
460 struct netmap_ring *ring = kring->ring;
461 u_int j, k, num_slots = kring->nkr_num_slots;
462 int new_slots, ntx;
463
464 IFRATE(rate_ctx.new.txsync++);
465
466 // TODO: handle the case of mbuf allocation failure
467 /* first, reclaim completed buffers */
468 generic_netmap_tx_clean(kring);
469
470 /* Take a copy of ring->cur now, and never read it again. */
471 k = ring->cur;
472 if (unlikely(k >= num_slots)) {
473 return netmap_ring_reinit(kring);
474 }
475
476 rmb();
477 j = kring->nr_hwcur;
478 /*
479 * 'new_slots' counts how many new slots have been added:
480 * everything from hwcur to cur, excluding reserved ones, if any.
481 * nr_hwreserved start from hwcur and counts how many slots were
482 * not sent to the NIC from the previous round.
483 */
484 new_slots = k - j - kring->nr_hwreserved;
485 if (new_slots < 0) {
486 new_slots += num_slots;
487 }
488 ntx = 0;
489 if (j != k) {
490 /* Process new packets to send:
491 * j is the current index in the netmap ring.
492 */
493 while (j != k) {
494 struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */
495 void *addr = NMB(slot);
496 u_int len = slot->len;
497 struct mbuf *m;
498 int tx_ret;
499
500 if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) {
501 return netmap_ring_reinit(kring);
502 }
503 /* Tale a mbuf from the tx pool and copy in the user packet. */
504 m = kring->tx_pool[j];
505 if (unlikely(!m)) {
506 RD(5, "This should never happen");
507 kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
508 if (unlikely(m == NULL)) {
509 D("mbuf allocation failed");
510 break;
511 }
512 }
513 /* XXX we should ask notifications when NS_REPORT is set,
514 * or roughly every half frame. We can optimize this
515 * by lazily requesting notifications only when a
516 * transmission fails. Probably the best way is to
517 * break on failures and set notifications when
518 * ring->avail == 0 || j != k
519 */
520 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr);
521 if (unlikely(tx_ret)) {
522 RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]",
523 tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail);
524 /*
525 * No room for this mbuf in the device driver.
526 * Request a notification FOR A PREVIOUS MBUF,
527 * then call generic_netmap_tx_clean(kring) to do the
528 * double check and see if we can free more buffers.
529 * If there is space continue, else break;
530 * NOTE: the double check is necessary if the problem
531 * occurs in the txsync call after selrecord().
532 * Also, we need some way to tell the caller that not
533 * all buffers were queued onto the device (this was
534 * not a problem with native netmap driver where space
535 * is preallocated). The bridge has a similar problem
536 * and we solve it there by dropping the excess packets.
537 */
538 generic_set_tx_event(kring, j);
539 if (generic_netmap_tx_clean(kring)) { /* space now available */
540 continue;
541 } else {
542 break;
543 }
544 }
545 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
546 if (unlikely(++j == num_slots))
547 j = 0;
548 ntx++;
549 }
550
551 /* Update hwcur to the next slot to transmit. */
552 kring->nr_hwcur = j;
553
554 /*
555 * Report all new slots as unavailable, even those not sent.
556 * We account for them with with hwreserved, so that
557 * nr_hwreserved =:= cur - nr_hwcur
558 */
559 kring->nr_hwavail -= new_slots;
560 kring->nr_hwreserved = k - j;
561 if (kring->nr_hwreserved < 0) {
562 kring->nr_hwreserved += num_slots;
563 }
564
565 IFRATE(rate_ctx.new.txpkt += ntx);
566
567 if (!kring->nr_hwavail) {
568 /* No more available slots? Set a notification event
569 * on a netmap slot that will be cleaned in the future.
570 * No doublecheck is performed, since txsync() will be
571 * called twice by netmap_poll().
572 */
573 generic_set_tx_event(kring, j);
574 }
575 ND("tx #%d, hwavail = %d", n, kring->nr_hwavail);
576 }
577
578 /* Synchronize the user's view to the kernel view. */
579 ring->avail = kring->nr_hwavail;
580 ring->reserved = kring->nr_hwreserved;
581
582 return 0;
583}
584
585/*
586 * This handler is registered (through netmap_catch_rx())
587 * within the attached network interface
588 * in the RX subsystem, so that every mbuf passed up by
589 * the driver can be stolen to the network stack.
590 * Stolen packets are put in a queue where the
591 * generic_netmap_rxsync() callback can extract them.
592 */
593void generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
594{
595 struct netmap_adapter *na = NA(ifp);
596 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
597 u_int work_done;
598 u_int rr = 0; // receive ring number
599
600 ND("called");
601 /* limit the size of the queue */
602 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) {
603 m_freem(m);
604 } else {
605 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m);
606 }
607
608 if (netmap_generic_mit < 32768) {
609 /* no rx mitigation, pass notification up */
610 netmap_generic_irq(na->ifp, rr, &work_done);
611 IFRATE(rate_ctx.new.rxirq++);
612 } else {
613 /* same as send combining, filter notification if there is a
614 * pending timer, otherwise pass it up and start a timer.
615 */
616 if (likely(netmap_mitigation_active(gna))) {
617 /* Record that there is some pending work. */
618 gna->mit_pending = 1;
619 } else {
620 netmap_generic_irq(na->ifp, rr, &work_done);
621 IFRATE(rate_ctx.new.rxirq++);
622 netmap_mitigation_start(gna);
623 }
624 }
625}
626
627/*
628 * generic_netmap_rxsync() extracts mbufs from the queue filled by
629 * generic_netmap_rx_handler() and puts their content in the netmap
630 * receive ring.
631 * Access must be protected because the rx handler is asynchronous,
632 */
633static int
634generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
635{
636 struct netmap_kring *kring = &na->rx_rings[ring_nr];
637 struct netmap_ring *ring = kring->ring;
638 u_int j, n, lim = kring->nkr_num_slots - 1;
639 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
640 u_int k, resvd = ring->reserved;
641
642 if (ring->cur > lim)
643 return netmap_ring_reinit(kring);
644
645 /* Import newly received packets into the netmap ring. */
646 if (netmap_no_pendintr || force_update) {
647 uint16_t slot_flags = kring->nkr_slot_flags;
648 struct mbuf *m;
649
650 n = 0;
651 j = kring->nr_ntc; /* first empty slot in the receive ring */
652 /* extract buffers from the rx queue, stop at most one
653 * slot before nr_hwcur (index k)
654 */
655 k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim;
656 while (j != k) {
657 int len;
658 void *addr = NMB(&ring->slot[j]);
659
660 if (addr == netmap_buffer_base) { /* Bad buffer */
661 return netmap_ring_reinit(kring);
662 }
663 /*
664 * Call the locked version of the function.
665 * XXX Ideally we could grab a batch of mbufs at once,
666 * by changing rx_queue into a ring.
667 */
668 m = mbq_safe_dequeue(&kring->rx_queue);
669 if (!m)
670 break;
671 len = MBUF_LEN(m);
672 m_copydata(m, 0, len, addr);
673 ring->slot[j].len = len;
674 ring->slot[j].flags = slot_flags;
675 m_freem(m);
676 if (unlikely(j++ == lim))
677 j = 0;
678 n++;
679 }
680 if (n) {
681 kring->nr_ntc = j;
682 kring->nr_hwavail += n;
683 IFRATE(rate_ctx.new.rxpkt += n);
684 }
685 kring->nr_kflags &= ~NKR_PENDINTR;
686 }
687
688 // XXX should we invert the order ?
689 /* Skip past packets that userspace has released */
690 j = kring->nr_hwcur;
691 k = ring->cur;
692 if (resvd > 0) {
693 if (resvd + ring->avail >= lim + 1) {
694 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
695 ring->reserved = resvd = 0; // XXX panic...
696 }
697 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
698 }
699 if (j != k) {
700 /* Userspace has released some packets. */
701 for (n = 0; j != k; n++) {
702 struct netmap_slot *slot = &ring->slot[j];
703
704 slot->flags &= ~NS_BUF_CHANGED;
705 if (unlikely(j++ == lim))
706 j = 0;
707 }
708 kring->nr_hwavail -= n;
709 kring->nr_hwcur = k;
710 }
711 /* Tell userspace that there are new packets. */
712 ring->avail = kring->nr_hwavail - resvd;
713 IFRATE(rate_ctx.new.rxsync++);
714
715 return 0;
716}
717
718static void
719generic_netmap_dtor(struct netmap_adapter *na)
720{
721 struct ifnet *ifp = na->ifp;
722 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
723 struct netmap_adapter *prev_na = gna->prev;
724
725 if (prev_na != NULL) {
726 D("Released generic NA %p", gna);
bf9f7c16 727#if 0
fb578518 728 if_rele(na->ifp);
bf9f7c16 729#endif
fb578518
FF
730 netmap_adapter_put(prev_na);
731 }
732 if (ifp != NULL) {
733 WNA(ifp) = prev_na;
734 D("Restored native NA %p", prev_na);
735 na->ifp = NULL;
736 }
737}
738
739/*
740 * generic_netmap_attach() makes it possible to use netmap on
741 * a device without native netmap support.
742 * This is less performant than native support but potentially
743 * faster than raw sockets or similar schemes.
744 *
745 * In this "emulated" mode, netmap rings do not necessarily
746 * have the same size as those in the NIC. We use a default
747 * value and possibly override it if the OS has ways to fetch the
748 * actual configuration.
749 */
750int
751generic_netmap_attach(struct ifnet *ifp)
752{
753 struct netmap_adapter *na;
754 struct netmap_generic_adapter *gna;
755 int retval;
756 u_int num_tx_desc, num_rx_desc;
757
758 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
759
760 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc);
761 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
762
ed9bd855 763 gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO);
fb578518
FF
764 if (gna == NULL) {
765 D("no memory on attach, give up");
766 return ENOMEM;
767 }
768 na = (struct netmap_adapter *)gna;
769 na->ifp = ifp;
770 na->num_tx_desc = num_tx_desc;
771 na->num_rx_desc = num_rx_desc;
772 na->nm_register = &generic_netmap_register;
773 na->nm_txsync = &generic_netmap_txsync;
774 na->nm_rxsync = &generic_netmap_rxsync;
775 na->nm_dtor = &generic_netmap_dtor;
776 /* when using generic, IFCAP_NETMAP is set so we force
777 * NAF_SKIP_INTR to use the regular interrupt handler
778 */
779 na->na_flags = NAF_SKIP_INTR;
780
781 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
782 ifp->num_tx_queues, ifp->real_num_tx_queues,
783 ifp->tx_queue_len);
784 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
785 ifp->num_rx_queues, ifp->real_num_rx_queues);
786
787 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
788
789 retval = netmap_attach_common(na);
790 if (retval) {
ed9bd855 791 kfree(gna, M_DEVBUF);
fb578518
FF
792 }
793
794 return retval;
795}