2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28 * $DragonFly: src/sys/net/dummynet/ip_dummynet.c,v 1.42 2007/11/05 13:26:08 sephe Exp $
32 #include "opt_ipfw.h" /* for IPFW2 definition */
36 #define DPRINTF(fmt, ...) kprintf(fmt, __VA_ARGS__)
38 #define DPRINTF(fmt, ...) ((void)0)
42 * This module implements IP dummynet, a bandwidth limiter/delay emulator
43 * used in conjunction with the ipfw package.
44 * Description of the data structures used is in ip_dummynet.h
45 * Here you mainly find the following blocks of code:
46 * + variable declarations;
47 * + heap management functions;
48 * + scheduler and dummynet functions;
49 * + configuration and initialization.
51 * Most important Changes:
54 * 010124: Fixed WF2Q behaviour
55 * 010122: Fixed spl protection.
56 * 000601: WF2Q support
57 * 000106: Large rewrite, use heaps to handle very many pipes.
58 * 980513: Initial release
61 #include <sys/param.h>
62 #include <sys/kernel.h>
63 #include <sys/malloc.h>
65 #include <sys/socketvar.h>
66 #include <sys/sysctl.h>
67 #include <sys/systimer.h>
68 #include <sys/thread2.h>
70 #include <net/ethernet.h>
71 #include <net/route.h>
72 #include <net/netmsg2.h>
74 #include <netinet/in.h>
75 #include <netinet/in_var.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip_var.h>
79 #include <net/ipfw/ip_fw.h>
80 #include <net/dummynet/ip_dummynet.h>
82 #ifndef DUMMYNET_CALLOUT_FREQ_MAX
83 #define DUMMYNET_CALLOUT_FREQ_MAX 10000
87 * We keep a private variable for the simulation time, but we could
88 * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
90 static dn_key curr_time = 0; /* current simulation time */
92 static int dn_hash_size = 64; /* default hash size */
94 /* statistics on number of queue searches and search steps */
95 static int searches, search_steps;
96 static int pipe_expire = 1; /* expire queue if empty */
97 static int dn_max_ratio = 16; /* max queues/buckets ratio */
99 static int red_lookup_depth = 256; /* RED - default lookup table depth */
100 static int red_avg_pkt_size = 512; /* RED - default medium packet size */
101 static int red_max_pkt_size = 1500; /* RED - default max packet size */
104 * Three heaps contain queues and pipes that the scheduler handles:
106 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
108 * wfq_ready_heap contains the pipes associated with WF2Q flows
110 * extract_heap contains pipes associated with delay lines.
114 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
116 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap;
118 static int heap_init(struct dn_heap *h, int size);
119 static int heap_insert (struct dn_heap *h, dn_key key1, void *p);
120 static void heap_extract(struct dn_heap *h, void *obj);
122 static void transmit_event(struct dn_pipe *pipe);
123 static void ready_event(struct dn_flow_queue *q);
125 static int sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
127 static struct dn_pipe *all_pipes = NULL; /* list of all pipes */
128 static struct dn_flow_set *all_flow_sets = NULL;/* list of all flow_sets */
130 static struct netmsg dn_netmsg;
131 static struct systimer dn_clock;
132 static int dn_hz = 1000;
133 static int dn_cpu = 0; /* TODO tunable */
135 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
136 CTLFLAG_RW, 0, "Dummynet");
137 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
138 CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size");
139 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time,
140 CTLFLAG_RD, &curr_time, 0, "Current tick");
141 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
142 CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap");
143 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
144 CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap");
145 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
146 CTLFLAG_RD, &searches, 0, "Number of queue searches");
147 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
148 CTLFLAG_RD, &search_steps, 0, "Number of queue search steps");
149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
150 CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty");
151 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
152 CTLFLAG_RW, &dn_max_ratio, 0,
153 "Max ratio between dynamic queues and buckets");
154 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
155 CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table");
156 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
157 CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size");
158 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
159 CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
160 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
161 0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
163 static int config_pipe(struct dn_ioc_pipe *);
164 static int ip_dn_ctl(struct sockopt *sopt);
166 static void rt_unref(struct rtentry *);
167 static void dummynet_clock(systimer_t, struct intrframe *);
168 static void dummynet(struct netmsg *);
169 static void dummynet_flush(void);
170 static ip_dn_io_t dummynet_io;
171 static void dn_rule_delete(void *);
173 void dummynet_drain(void); /* XXX unused */
176 rt_unref(struct rtentry *rt)
180 if (rt->rt_refcnt <= 0)
181 kprintf("-- warning, refcnt now %ld, decreasing\n", rt->rt_refcnt);
186 * Heap management functions.
188 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
189 * Some macros help finding parent/children so we can optimize them.
191 * heap_init() is called to expand the heap when needed.
192 * Increment size in blocks of 16 entries.
193 * XXX failure to allocate a new element is a pretty bad failure
194 * as we basically stall a whole queue forever!!
195 * Returns 1 on error, 0 on success
197 #define HEAP_FATHER(x) (((x) - 1) / 2)
198 #define HEAP_LEFT(x) (2*(x) + 1)
199 #define HEAP_IS_LEFT(x) ((x) & 1)
200 #define HEAP_RIGHT(x) (2*(x) + 2)
201 #define HEAP_SWAP(a, b, buffer) { buffer = a; a = b; b = buffer; }
202 #define HEAP_INCREMENT 15
205 heap_init(struct dn_heap *h, int new_size)
207 struct dn_heap_entry *p;
209 if (h->size >= new_size) {
210 kprintf("%s, Bogus call, have %d want %d\n", __func__,
215 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
216 p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
218 bcopy(h->p, p, h->size * sizeof(*p));
219 kfree(h->p, M_DUMMYNET);
227 * Insert element in heap. Normally, p != NULL, we insert p in
228 * a new position and bubble up. If p == NULL, then the element is
229 * already in place, and key is the position where to start the
231 * Returns 1 on failure (cannot allocate new heap entry)
233 * If offset > 0 the position (index, int) of the element in the heap is
234 * also stored in the element itself at the given offset in bytes.
236 #define SET_OFFSET(heap, node) \
237 if (heap->offset > 0) \
238 *((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
241 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
243 #define RESET_OFFSET(heap, node) \
244 if (heap->offset > 0) \
245 *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
248 heap_insert(struct dn_heap *h, dn_key key1, void *p)
250 int son = h->elements;
252 if (p == NULL) { /* Data already there, set starting point */
254 } else { /* Insert new element at the end, possibly resize */
256 if (son == h->size) { /* Need resize... */
257 if (heap_init(h, h->elements + 1))
258 return 1; /* Failure... */
260 h->p[son].object = p;
261 h->p[son].key = key1;
265 while (son > 0) { /* Bubble up */
266 int father = HEAP_FATHER(son);
267 struct dn_heap_entry tmp;
269 if (DN_KEY_LT(h->p[father].key, h->p[son].key))
270 break; /* Found right position */
272 /* 'son' smaller than 'father', swap and repeat */
273 HEAP_SWAP(h->p[son], h->p[father], tmp);
282 * Remove top element from heap, or obj if obj != NULL
285 heap_extract(struct dn_heap *h, void *obj)
287 int child, father, max = h->elements - 1;
290 kprintf("warning, extract from empty heap 0x%p\n", h);
294 father = 0; /* Default: move up smallest child */
295 if (obj != NULL) { /* Extract specific element, index is at offset */
297 panic("%s from middle not supported on this heap!!!\n", __func__);
299 father = *((int *)((char *)obj + h->offset));
300 if (father < 0 || father >= h->elements) {
301 panic("%s father %d out of bound 0..%d\n", __func__,
302 father, h->elements);
305 RESET_OFFSET(h, father);
307 child = HEAP_LEFT(father); /* Left child */
308 while (child <= max) { /* Valid entry */
309 if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
310 child = child + 1; /* Take right child, otherwise left */
311 h->p[father] = h->p[child];
312 SET_OFFSET(h, father);
314 child = HEAP_LEFT(child); /* Left child for next loop */
319 * Fill hole with last entry and bubble up, reusing the insert code
321 h->p[father] = h->p[max];
322 heap_insert(h, father, NULL); /* This one cannot fail */
327 * heapify() will reorganize data inside an array to maintain the
328 * heap property. It is needed when we delete a bunch of entries.
331 heapify(struct dn_heap *h)
335 for (i = 0; i < h->elements; i++)
336 heap_insert(h, i , NULL);
340 * Cleanup the heap and free data structure
343 heap_free(struct dn_heap *h)
346 kfree(h->p, M_DUMMYNET);
347 bzero(h, sizeof(*h));
351 * --- End of heap management functions ---
355 * Scheduler functions:
357 * transmit_event() is called when the delay-line needs to enter
358 * the scheduler, either because of existing pkts getting ready,
359 * or new packets entering the queue. The event handled is the delivery
360 * time of the packet.
362 * ready_event() does something similar with fixed-rate queues, and the
363 * event handled is the finish time of the head pkt.
365 * wfq_ready_event() does something similar with WF2Q queues, and the
366 * event handled is the start time of the head pkt.
368 * In all cases, we make sure that the data structures are consistent
369 * before passing pkts out, because this might trigger recursive
370 * invocations of the procedures.
373 transmit_event(struct dn_pipe *pipe)
377 while ((pkt = pipe->head) && DN_KEY_LEQ(pkt->output_time, curr_time)) {
381 * First unlink, then call procedures, since ip_input() can invoke
382 * ip_output() and viceversa, thus causing nested calls
384 pipe->head = pkt->dn_next;
388 * 'pkt' should _not_ be touched after calling
389 * ip_output(), ip_input(), ether_demux() and ether_output_frame()
391 switch (pkt->dn_dir) {
394 * 'pkt' will be freed in ip_output, so we keep
395 * a reference of the 'rtentry' beforehand.
398 ip_output(pkt->dn_m, NULL, NULL, 0, NULL, NULL);
406 case DN_TO_ETH_DEMUX:
408 struct mbuf *m = pkt->dn_m;
409 struct ether_header *eh;
411 if (m->m_len < ETHER_HDR_LEN &&
412 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
413 kprintf("dummynet: pullup fail, dropping pkt\n");
417 * Same as ether_input, make eh be a pointer into the mbuf
419 eh = mtod(m, struct ether_header *);
420 m_adj(m, ETHER_HDR_LEN);
421 ether_demux(NULL, eh, m);
426 ether_output_frame(pkt->ifp, pkt->dn_m);
430 kprintf("dummynet: bad switch %d!\n", pkt->dn_dir);
437 * If there are leftover packets, put into the heap for next event
439 if ((pkt = pipe->head)) {
441 * XXX should check errors on heap_insert, by draining the
442 * whole pipe and hoping in the future we are more successful
444 heap_insert(&extract_heap, pkt->output_time, pipe);
449 * The following macro computes how many ticks we have to wait
450 * before being able to transmit a packet. The credit is taken from
451 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
453 #define SET_TICKS(pkt, q, p) \
454 (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
458 * Extract pkt from queue, compute output time (could be now)
459 * and put into delay line (p_queue)
462 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
463 struct dn_pipe *p, int len)
465 q->head = pkt->dn_next;
469 pkt->output_time = curr_time + p->delay;
474 p->tail->dn_next = pkt;
476 p->tail->dn_next = NULL;
480 * ready_event() is invoked every time the queue must enter the
481 * scheduler, either because the first packet arrives, or because
482 * a previously scheduled event fired.
483 * On invokation, drain as many pkts as possible (could be 0) and then
484 * if there are leftover packets reinsert the pkt in the scheduler.
487 ready_event(struct dn_flow_queue *q)
490 struct dn_pipe *p = q->fs->pipe;
494 kprintf("ready_event- pipe is gone\n");
497 p_was_empty = (p->head == NULL);
500 * Schedule fixed-rate queues linked to this pipe:
501 * Account for the bw accumulated since last scheduling, then
502 * drain as many pkts as allowed by q->numbytes and move to
503 * the delay line (in p) computing output time.
504 * bandwidth==0 (no limit) means we can drain the whole queue,
505 * setting len_scaled = 0 does the job.
507 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
508 while ((pkt = q->head) != NULL) {
509 int len = pkt->dn_m->m_pkthdr.len;
510 int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
512 if (len_scaled > q->numbytes)
514 q->numbytes -= len_scaled;
515 move_pkt(pkt, q, p, len);
519 * If we have more packets queued, schedule next ready event
520 * (can only occur when bandwidth != 0, otherwise we would have
521 * flushed the whole queue in the previous loop).
522 * To this purpose we record the current time and compute how many
523 * ticks to go for the finish time of the packet.
525 if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */
526 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
528 q->sched_time = curr_time;
531 * XXX should check errors on heap_insert, and drain the whole
532 * queue on error hoping next time we are luckier.
534 heap_insert(&ready_heap, curr_time + t, q);
535 } else { /* RED needs to know when the queue becomes empty */
536 q->q_time = curr_time;
541 * If the delay line was empty call transmit_event(p) now.
542 * Otherwise, the scheduler will take care of it.
549 * Called when we can transmit packets on WF2Q queues. Take pkts out of
550 * the queues at their start time, and enqueue into the delay line.
551 * Packets are drained until p->numbytes < 0. As long as
552 * len_scaled >= p->numbytes, the packet goes into the delay line
553 * with a deadline p->delay. For the last packet, if p->numbytes < 0,
554 * there is an additional delay.
557 ready_event_wfq(struct dn_pipe *p)
559 int p_was_empty = (p->head == NULL);
560 struct dn_heap *sch = &p->scheduler_heap;
561 struct dn_heap *neh = &p->not_eligible_heap;
563 p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
566 * While we have backlogged traffic AND credit, we need to do
567 * something on the queue.
569 while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
570 if (sch->elements > 0) { /* Have some eligible pkts to send out */
571 struct dn_flow_queue *q = sch->p[0].object;
572 struct dn_pkt *pkt = q->head;
573 struct dn_flow_set *fs = q->fs;
574 uint64_t len = pkt->dn_m->m_pkthdr.len;
575 int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
577 heap_extract(sch, NULL); /* Remove queue from heap */
578 p->numbytes -= len_scaled;
579 move_pkt(pkt, q, p, len);
581 p->V += (len << MY_M) / p->sum; /* Update V */
582 q->S = q->F; /* Update start time */
584 if (q->len == 0) { /* Flow not backlogged any more */
586 heap_insert(&p->idle_heap, q->F, q);
587 } else { /* Still backlogged */
589 * Update F and position in backlogged queue, then
590 * put flow in not_eligible_heap (we will fix this later).
592 len = q->head->dn_m->m_pkthdr.len;
593 q->F += (len << MY_M) / (uint64_t)fs->weight;
594 if (DN_KEY_LEQ(q->S, p->V))
595 heap_insert(neh, q->S, q);
597 heap_insert(sch, q->F, q);
602 * Now compute V = max(V, min(S_i)). Remember that all elements in
603 * sch have by definition S_i <= V so if sch is not empty, V is surely
604 * the max and we must not update it. Conversely, if sch is empty
605 * we only need to look at neh.
607 if (sch->elements == 0 && neh->elements > 0)
608 p->V = MAX64(p->V, neh->p[0].key);
611 * Move from neh to sch any packets that have become eligible
613 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
614 struct dn_flow_queue *q = neh->p[0].object;
616 heap_extract(neh, NULL);
617 heap_insert(sch, q->F, q);
621 if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
622 p->idle_heap.elements > 0) {
624 * No traffic and no events scheduled. We can get rid of idle-heap.
628 for (i = 0; i < p->idle_heap.elements; i++) {
629 struct dn_flow_queue *q = p->idle_heap.p[i].object;
636 p->idle_heap.elements = 0;
640 * If we are getting clocks from dummynet and if we are under credit,
641 * schedule the next ready event.
642 * Also fix the delivery time of the last packet.
644 if (p->numbytes < 0) { /* This implies bandwidth>0 */
645 dn_key t = 0; /* Number of ticks i have to wait */
647 if (p->bandwidth > 0)
648 t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
649 p->tail->output_time += t;
650 p->sched_time = curr_time;
653 * XXX should check errors on heap_insert, and drain the whole
654 * queue on error hoping next time we are luckier.
656 heap_insert(&wfq_ready_heap, curr_time + t, p);
660 * If the delay line was empty call transmit_event(p) now.
661 * Otherwise, the scheduler will take care of it.
668 * This is called once per tick, or dn_hz times per second. It is used to
669 * increment the current tick counter and schedule expired events.
672 dummynet(struct netmsg *msg)
676 struct dn_heap *heaps[3];
680 heaps[0] = &ready_heap; /* Fixed-rate queues */
681 heaps[1] = &wfq_ready_heap; /* WF2Q queues */
682 heaps[2] = &extract_heap; /* Delay line */
687 lwkt_replymsg(&msg->nm_lmsg, 0);
690 for (i = 0; i < 3; i++) {
692 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
693 if (h->p[0].key > curr_time) {
694 kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
695 i, (int)(curr_time - h->p[0].key));
698 p = h->p[0].object; /* Store a copy before heap_extract */
699 heap_extract(h, NULL); /* Need to extract before processing */
711 * Sweep pipes trying to expire idle flow_queues
713 for (pe = all_pipes; pe; pe = pe->next) {
714 if (pe->idle_heap.elements > 0 &&
715 DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) {
716 struct dn_flow_queue *q = pe->idle_heap.p[0].object;
718 heap_extract(&pe->idle_heap, NULL);
719 q->S = q->F + 1; /* Mark timestamp as invalid */
720 pe->sum -= q->fs->weight;
728 * Unconditionally expire empty queues in case of shortage.
729 * Returns the number of queues freed.
732 expire_queues(struct dn_flow_set *fs)
734 struct dn_flow_queue *q, *prev;
735 int i, initial_elements = fs->rq_elements;
737 if (fs->last_expired == time_second)
740 fs->last_expired = time_second;
742 for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
743 for (prev = NULL, q = fs->rq[i]; q != NULL;) {
744 if (q->head != NULL || q->S != q->F + 1) {
747 } else { /* Entry is idle, expire it */
748 struct dn_flow_queue *old_q = q;
751 prev->next = q = q->next;
753 fs->rq[i] = q = q->next;
755 kfree(old_q, M_DUMMYNET);
759 return initial_elements - fs->rq_elements;
763 * If room, create a new queue and put at head of slot i;
764 * otherwise, create or use the default queue.
766 static struct dn_flow_queue *
767 create_queue(struct dn_flow_set *fs, int i)
769 struct dn_flow_queue *q;
771 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
772 expire_queues(fs) == 0) {
774 * No way to get room, use or create overflow queue.
777 if (fs->rq[i] != NULL)
781 q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
788 q->S = q->F + 1; /* hack - mark timestamp as invalid */
796 * Given a flow_set and a pkt in last_pkt, find a matching queue
797 * after appropriate masking. The queue is moved to front
798 * so that further searches take less time.
800 static struct dn_flow_queue *
801 find_queue(struct dn_flow_set *fs, struct ipfw_flow_id *id)
803 struct dn_flow_queue *q, *prev;
806 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
809 /* First, do the masking */
810 id->dst_ip &= fs->flow_mask.dst_ip;
811 id->src_ip &= fs->flow_mask.src_ip;
812 id->dst_port &= fs->flow_mask.dst_port;
813 id->src_port &= fs->flow_mask.src_port;
814 id->proto &= fs->flow_mask.proto;
815 id->flags = 0; /* we don't care about this one */
817 /* Then, hash function */
818 i = ((id->dst_ip) & 0xffff) ^
819 ((id->dst_ip >> 15) & 0xffff) ^
820 ((id->src_ip << 1) & 0xffff) ^
821 ((id->src_ip >> 16 ) & 0xffff) ^
822 (id->dst_port << 1) ^ (id->src_port) ^
826 /* Finally, scan the current list for a match */
828 for (prev = NULL, q = fs->rq[i]; q;) {
830 if (id->dst_ip == q->id.dst_ip &&
831 id->src_ip == q->id.src_ip &&
832 id->dst_port == q->id.dst_port &&
833 id->src_port == q->id.src_port &&
834 id->proto == q->id.proto &&
835 id->flags == q->id.flags) {
837 } else if (pipe_expire && q->head == NULL && q->S == q->F + 1) {
838 /* Entry is idle and not in any heap, expire it */
839 struct dn_flow_queue *old_q = q;
842 prev->next = q = q->next;
844 fs->rq[i] = q = q->next;
846 kfree(old_q, M_DUMMYNET);
852 if (q && prev != NULL) { /* Found and not in front */
853 prev->next = q->next;
858 if (q == NULL) { /* No match, need to allocate a new entry */
859 q = create_queue(fs, i);
867 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
872 * RED calculates the average queue size (avg) using a low-pass filter
873 * with an exponential weighted (w_q) moving average:
874 * avg <- (1-w_q) * avg + w_q * q_size
875 * where q_size is the queue length (measured in bytes or * packets).
877 * If q_size == 0, we compute the idle time for the link, and set
878 * avg = (1 - w_q)^(idle/s)
879 * where s is the time needed for transmitting a medium-sized packet.
881 * Now, if avg < min_th the packet is enqueued.
882 * If avg > max_th the packet is dropped. Otherwise, the packet is
883 * dropped with probability P function of avg.
887 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
889 DPRINTF("\n%d q: %2u ", (int)curr_time, q_size);
891 /* Average queue size estimation */
894 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
896 int diff = SCALE(q_size) - q->avg;
897 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
902 * Queue is empty, find for how long the queue has been
903 * empty and use a lookup table for computing
904 * (1 - * w_q)^(idle_time/s) where s is the time to send a
909 u_int t = (curr_time - q->q_time) / fs->lookup_step;
911 q->avg = (t < fs->lookup_depth) ?
912 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
915 DPRINTF("avg: %u ", SCALE_VAL(q->avg));
919 if (q->avg < fs->min_th) {
925 if (q->avg >= fs->max_th) { /* Average queue >= Max threshold */
926 if (fs->flags_fs & DN_IS_GENTLE_RED) {
928 * According to Gentle-RED, if avg is greater than max_th the
929 * packet is dropped with a probability
930 * p_b = c_3 * avg - c_4
931 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
933 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
939 } else if (q->avg > fs->min_th) {
941 * We compute p_b using the linear dropping function p_b = c_1 *
942 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
943 * max_p * min_th / (max_th - min_th)
945 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
947 if (fs->flags_fs & DN_QSIZE_IS_BYTES)
948 p_b = (p_b * len) / fs->max_pkt_size;
950 if (++q->count == 0) {
951 q->random = krandom() & 0xffff;
954 * q->count counts packets arrived since last drop, so a greater
955 * value of q->count means a greater packet drop probability.
957 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
959 DPRINTF("%s", "- red drop");
960 /* After a drop we calculate a new random value */
961 q->random = krandom() & 0xffff;
965 /* End of RED algorithm */
966 return 0; /* Accept */
969 static __inline struct dn_flow_set *
970 locate_flowset(int pipe_nr, struct ip_fw *rule)
972 ipfw_insn *cmd = rule->cmd + rule->act_ofs;
973 struct dn_flow_set *fs;
975 if (cmd->opcode == O_LOG)
978 fs = ((ipfw_insn_pipe *)cmd)->pipe_ptr;
982 if (cmd->opcode == O_QUEUE) {
983 for (fs = all_flow_sets; fs && fs->fs_nr != pipe_nr; fs = fs->next)
988 for (p = all_pipes; p && p->pipe_nr != pipe_nr; p = p->next)
994 /* record for the future */
995 ((ipfw_insn_pipe *)cmd)->pipe_ptr = fs;
1000 * Dummynet hook for packets. Below 'pipe' is a pipe or a queue
1001 * depending on whether WF2Q or fixed bw is used.
1003 * pipe_nr pipe or queue the packet is destined for.
1004 * dir where shall we send the packet after dummynet.
1005 * m the mbuf with the packet
1006 * fwa->oif the 'ifp' parameter from the caller.
1007 * NULL in ip_input, destination interface in ip_output
1008 * fwa->ro route parameter (only used in ip_output, NULL otherwise)
1009 * fwa->dst destination address, only used by ip_output
1010 * fwa->rule matching rule, in case of multiple passes
1011 * fwa->flags flags from the caller, only used in ip_output
1014 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1018 struct dn_flow_set *fs;
1019 struct dn_pipe *pipe;
1020 uint64_t len = m->m_pkthdr.len;
1021 struct dn_flow_queue *q = NULL;
1027 cmd = fwa->rule->cmd + fwa->rule->act_ofs;
1028 if (cmd->opcode == O_LOG)
1031 KASSERT(cmd->opcode == O_PIPE || cmd->opcode == O_QUEUE,
1032 ("Rule is not PIPE or QUEUE, opcode %d\n", cmd->opcode));
1034 is_pipe = (cmd->opcode == O_PIPE);
1038 * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1040 fs = locate_flowset(pipe_nr, fwa->rule);
1042 goto dropit; /* This queue/pipe does not exist! */
1045 if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1046 for (pipe = all_pipes; pipe && pipe->pipe_nr != fs->parent_nr;
1052 kprintf("No pipe %d for queue %d, drop pkt\n",
1053 fs->parent_nr, fs->fs_nr);
1058 q = find_queue(fs, &fwa->f_id);
1060 goto dropit; /* Cannot allocate queue */
1063 * Update statistics, then check reasons to drop pkt
1065 q->tot_bytes += len;
1068 if (fs->plr && krandom() < fs->plr)
1069 goto dropit; /* Random pkt drop */
1071 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1072 if (q->len_bytes > fs->qsize)
1073 goto dropit; /* Queue size overflow */
1075 if (q->len >= fs->qsize)
1076 goto dropit; /* Queue count overflow */
1079 if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1083 * Build and enqueue packet + parameters
1085 tag = m_tag_get(PACKET_TAG_DUMMYNET, sizeof(*pkt), MB_DONTWAIT /* XXX */);
1088 m_tag_prepend(m, tag);
1090 pkt = m_tag_data(tag);
1091 bzero(pkt, sizeof(*pkt)); /* XXX expensive to zero */
1093 pkt->rule = fwa->rule;
1094 pkt->dn_next = NULL;
1098 pkt->ifp = fwa->oif;
1099 if (dir == DN_TO_IP_OUT) {
1101 * We need to copy *ro because for ICMP pkts (and maybe others)
1102 * the caller passed a pointer into the stack; dst might also be
1103 * a pointer into *ro so it needs to be updated.
1105 pkt->ro = *(fwa->ro);
1107 fwa->ro->ro_rt->rt_refcnt++;
1108 if (fwa->dst == (struct sockaddr_in *)&fwa->ro->ro_dst) {
1109 /* 'dst' points into 'ro' */
1110 fwa->dst = (struct sockaddr_in *)&(pkt->ro.ro_dst);
1113 pkt->dn_dst = fwa->dst;
1114 pkt->flags = fwa->flags;
1116 if (q->head == NULL)
1119 q->tail->dn_next = pkt;
1122 q->len_bytes += len;
1124 if (q->head != pkt) /* Flow was not idle, we are done */
1128 * If we reach this point the flow was previously idle, so we need
1129 * to schedule it. This involves different actions for fixed-rate
1134 * Fixed-rate queue: just insert into the ready_heap.
1138 if (pipe->bandwidth)
1139 t = SET_TICKS(pkt, q, pipe);
1141 q->sched_time = curr_time;
1142 if (t == 0) /* Must process it now */
1145 heap_insert(&ready_heap, curr_time + t, q);
1149 * First, compute start time S: if the flow was idle (S=F+1)
1150 * set S to the virtual time V for the controlling pipe, and update
1151 * the sum of weights for the pipe; otherwise, remove flow from
1152 * idle_heap and set S to max(F, V).
1153 * Second, compute finish time F = S + len/weight.
1154 * Third, if pipe was idle, update V = max(S, V).
1155 * Fourth, count one more backlogged flow.
1157 if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1159 pipe->sum += fs->weight; /* Add weight of new queue */
1161 heap_extract(&pipe->idle_heap, q);
1162 q->S = MAX64(q->F, pipe->V);
1164 q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1166 if (pipe->not_eligible_heap.elements == 0 &&
1167 pipe->scheduler_heap.elements == 0)
1168 pipe->V = MAX64(q->S, pipe->V);
1173 * Look at eligibility. A flow is not eligibile if S>V (when
1174 * this happens, it means that there is some other flow already
1175 * scheduled for the same pipe, so the scheduler_heap cannot be
1176 * empty). If the flow is not eligible we just store it in the
1177 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1178 * and possibly invoke ready_event_wfq() right now if there is
1180 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1181 * and for all flows in not_eligible_heap (NEH), S_i > V.
1182 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1183 * we only need to look into NEH.
1185 if (DN_KEY_GT(q->S, pipe->V)) { /* Not eligible */
1186 if (pipe->scheduler_heap.elements == 0)
1187 kprintf("++ ouch! not eligible but empty scheduler!\n");
1188 heap_insert(&pipe->not_eligible_heap, q->S, q);
1190 heap_insert(&pipe->scheduler_heap, q->F, q);
1191 if (pipe->numbytes >= 0) { /* Pipe is idle */
1192 if (pipe->scheduler_heap.elements != 1)
1193 kprintf("*** OUCH! pipe should have been idle!\n");
1194 DPRINTF("Waking up pipe %d at %d\n",
1195 pipe->pipe_nr, (int)(q->F >> MY_M));
1196 pipe->sched_time = curr_time;
1197 ready_event_wfq(pipe);
1210 return ((fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
1214 * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1215 * Doing this would probably save us the initial bzero of dn_pkt
1217 #define DN_FREE_PKT(pkt) { \
1218 struct dn_pkt *n = pkt; \
1220 rt_unref (n->ro.ro_rt); \
1224 * Dispose all packets and flow_queues on a flow_set.
1225 * If all=1, also remove red lookup table and other storage,
1226 * including the descriptor itself.
1227 * For the one in dn_pipe MUST also cleanup ready_heap...
1230 purge_flow_set(struct dn_flow_set *fs, int all)
1234 for (i = 0; i <= fs->rq_size; i++) {
1235 struct dn_flow_queue *q, *qn;
1237 for (q = fs->rq[i]; q; q = qn) {
1240 for (pkt = q->head; pkt;)
1244 kfree(q, M_DUMMYNET);
1248 fs->rq_elements = 0;
1251 /* RED - free lookup table */
1253 kfree(fs->w_q_lookup, M_DUMMYNET);
1256 kfree(fs->rq, M_DUMMYNET);
1258 /* If this fs is not part of a pipe, free it */
1259 if (fs->pipe && fs != &fs->pipe->fs)
1260 kfree(fs, M_DUMMYNET);
1265 * Dispose all packets queued on a pipe (not a flow_set).
1266 * Also free all resources associated to a pipe, which is about
1270 purge_pipe(struct dn_pipe *pipe)
1274 purge_flow_set(&pipe->fs, 1);
1276 for (pkt = pipe->head; pkt;)
1279 heap_free(&pipe->scheduler_heap);
1280 heap_free(&pipe->not_eligible_heap);
1281 heap_free(&pipe->idle_heap);
1285 * Delete all pipes and heaps returning memory. Must also
1286 * remove references from all ipfw rules to all pipes.
1289 dummynet_flush(void)
1292 struct dn_flow_set *fs;
1296 /* Remove all references to pipes ... */
1297 flush_pipe_ptrs(NULL);
1299 /* Prevent future matches... */
1303 all_flow_sets = NULL;
1305 /* Free heaps so we don't have unwanted events */
1306 heap_free(&ready_heap);
1307 heap_free(&wfq_ready_heap);
1308 heap_free(&extract_heap);
1313 * Now purge all queued pkts and delete all pipes
1315 /* Scan and purge all flow_sets. */
1316 while (fs != NULL) {
1317 struct dn_flow_set *curr_fs = fs;
1320 purge_flow_set(curr_fs, 1);
1323 struct dn_pipe *curr_p = p;
1327 kfree(curr_p, M_DUMMYNET);
1332 extern struct ip_fw *ip_fw_default_rule;
1335 dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
1339 for (i = 0; i <= fs->rq_size; i++) { /* Last one is ovflow */
1340 struct dn_flow_queue *q;
1342 for (q = fs->rq[i]; q; q = q->next) {
1345 for (pkt = q->head; pkt; pkt = pkt->dn_next) {
1347 pkt->rule = ip_fw_default_rule;
1354 * When a firewall rule is deleted, scan all queues and remove the flow-id
1355 * from packets matching this rule.
1358 dn_rule_delete(void *r)
1361 struct dn_flow_set *fs;
1364 * If the rule references a queue (dn_flow_set), then scan
1365 * the flow set, otherwise scan pipes. Should do either, but doing
1366 * both does not harm.
1369 for (fs = all_flow_sets; fs; fs = fs->next)
1370 dn_rule_delete_fs(fs, r);
1372 for (p = all_pipes; p; p = p->next) {
1376 dn_rule_delete_fs(fs, r);
1378 for (pkt = p->head; pkt; pkt = pkt->dn_next) {
1380 pkt->rule = ip_fw_default_rule;
1386 * setup RED parameters
1389 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1393 x->w_q = ioc_fs->w_q;
1394 x->min_th = SCALE(ioc_fs->min_th);
1395 x->max_th = SCALE(ioc_fs->max_th);
1396 x->max_p = ioc_fs->max_p;
1398 x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1399 x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1400 if (x->flags_fs & DN_IS_GENTLE_RED) {
1401 x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1402 x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1405 /* If the lookup table already exist, free and create it again */
1406 if (x->w_q_lookup) {
1407 kfree(x->w_q_lookup, M_DUMMYNET);
1408 x->w_q_lookup = NULL ;
1411 if (red_lookup_depth == 0) {
1412 kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1413 kfree(x, M_DUMMYNET);
1416 x->lookup_depth = red_lookup_depth;
1417 x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1418 M_DUMMYNET, M_WAITOK);
1420 /* Fill the lookup table with (1 - w_q)^x */
1421 x->lookup_step = ioc_fs->lookup_step;
1422 x->lookup_weight = ioc_fs->lookup_weight;
1424 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1425 for (i = 1; i < x->lookup_depth; i++)
1426 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1428 if (red_avg_pkt_size < 1)
1429 red_avg_pkt_size = 512;
1430 x->avg_pkt_size = red_avg_pkt_size;
1432 if (red_max_pkt_size < 1)
1433 red_max_pkt_size = 1500;
1434 x->max_pkt_size = red_max_pkt_size;
1440 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1442 if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1443 int l = ioc_fs->rq_size;
1445 /* Allocate some slots */
1449 if (l < DN_MIN_HASH_SIZE)
1450 l = DN_MIN_HASH_SIZE;
1451 else if (l > DN_MAX_HASH_SIZE)
1452 l = DN_MAX_HASH_SIZE;
1456 /* One is enough for null mask */
1459 x->rq = kmalloc((1 + x->rq_size) * sizeof(struct dn_flow_queue *),
1460 M_DUMMYNET, M_WAITOK | M_ZERO);
1465 set_flowid_parms(struct ipfw_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1467 id->dst_ip = ioc_id->u.ip.dst_ip;
1468 id->src_ip = ioc_id->u.ip.src_ip;
1469 id->dst_port = ioc_id->u.ip.dst_port;
1470 id->src_port = ioc_id->u.ip.src_port;
1471 id->proto = ioc_id->u.ip.proto;
1472 id->flags = ioc_id->u.ip.flags;
1476 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1478 x->flags_fs = ioc_fs->flags_fs;
1479 x->qsize = ioc_fs->qsize;
1480 x->plr = ioc_fs->plr;
1481 set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1482 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1483 if (x->qsize > 1024 * 1024)
1484 x->qsize = 1024 * 1024;
1486 if (x->qsize == 0 || x->qsize > 100)
1490 /* Configuring RED */
1491 if (x->flags_fs & DN_IS_RED)
1492 config_red(ioc_fs, x); /* XXX should check errors */
1496 * setup pipe or queue parameters.
1500 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1502 struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1506 * The config program passes parameters as follows:
1507 * bw bits/second (0 means no limits)
1508 * delay ms (must be translated into ticks)
1509 * qsize slots or bytes
1511 ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1514 * We need either a pipe number or a flow_set number
1516 if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1518 if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1524 if (ioc_pipe->pipe_nr != 0) { /* This is a pipe */
1525 struct dn_pipe *x, *a, *b;
1528 for (a = NULL, b = all_pipes; b && b->pipe_nr < ioc_pipe->pipe_nr;
1532 if (b == NULL || b->pipe_nr != ioc_pipe->pipe_nr) { /* New pipe */
1533 x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1534 x->pipe_nr = ioc_pipe->pipe_nr;
1538 * idle_heap is the only one from which we extract from the middle.
1540 x->idle_heap.size = x->idle_heap.elements = 0;
1541 x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1547 /* Flush accumulated credit for all queues */
1548 for (i = 0; i <= x->fs.rq_size; i++) {
1549 struct dn_flow_queue *q;
1551 for (q = x->fs.rq[i]; q; q = q->next)
1556 x->bandwidth = ioc_pipe->bandwidth;
1557 x->numbytes = 0; /* Just in case... */
1558 x->delay = ioc_pipe->delay;
1560 set_fs_parms(&x->fs, ioc_fs);
1562 if (x->fs.rq == NULL) { /* A new pipe */
1563 alloc_hash(&x->fs, ioc_fs);
1571 } else { /* Config flow_set */
1572 struct dn_flow_set *x, *a, *b;
1574 /* Locate flow_set */
1575 for (a = NULL, b = all_flow_sets; b && b->fs_nr < ioc_fs->fs_nr;
1579 if (b == NULL || b->fs_nr != ioc_fs->fs_nr) { /* New flow_set */
1580 if (ioc_fs->parent_nr == 0) /* Need link to a pipe */
1583 x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1585 x->fs_nr = ioc_fs->fs_nr;
1586 x->parent_nr = ioc_fs->parent_nr;
1587 x->weight = ioc_fs->weight;
1590 else if (x->weight > 100)
1593 /* Change parent pipe not allowed; must delete and recreate */
1594 if (ioc_fs->parent_nr != 0 && b->parent_nr != ioc_fs->parent_nr)
1599 set_fs_parms(x, ioc_fs);
1601 if (x->rq == NULL) { /* A new flow_set */
1602 alloc_hash(x, ioc_fs);
1619 * Helper function to remove from a heap queues which are linked to
1620 * a flow_set about to be deleted.
1623 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1625 int i = 0, found = 0;
1627 while (i < h->elements) {
1628 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1630 h->p[i] = h->p[h->elements];
1641 * helper function to remove a pipe from a heap (can be there at most once)
1644 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1646 if (h->elements > 0) {
1649 for (i = 0; i < h->elements; i++) {
1650 if (h->p[i].object == p) { /* found it */
1652 h->p[i] = h->p[h->elements];
1661 * drain all queues. Called in case of severe mbuf shortage.
1664 dummynet_drain(void)
1666 struct dn_flow_set *fs;
1670 heap_free(&ready_heap);
1671 heap_free(&wfq_ready_heap);
1672 heap_free(&extract_heap);
1674 /* remove all references to this pipe from flow_sets */
1675 for (fs = all_flow_sets; fs; fs= fs->next)
1676 purge_flow_set(fs, 0);
1678 for (p = all_pipes; p; p= p->next) {
1679 purge_flow_set(&p->fs, 0);
1680 for (pkt = p->head; pkt ;)
1682 p->head = p->tail = NULL;
1687 * Fully delete a pipe or a queue, cleaning up associated info.
1690 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1694 if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1696 if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1702 if (ioc_pipe->pipe_nr != 0) { /* This is an old-style pipe */
1703 struct dn_pipe *a, *b;
1704 struct dn_flow_set *fs;
1707 for (a = NULL, b = all_pipes; b && b->pipe_nr < ioc_pipe->pipe_nr;
1710 if (b == NULL || b->pipe_nr != ioc_pipe->pipe_nr)
1711 goto back; /* Not found */
1713 /* Unlink from list of pipes */
1715 all_pipes = b->next;
1719 /* Remove references to this pipe from the ip_fw rules. */
1720 flush_pipe_ptrs(&b->fs);
1722 /* Remove all references to this pipe from flow_sets */
1723 for (fs = all_flow_sets; fs; fs = fs->next) {
1724 if (fs->pipe == b) {
1725 kprintf("++ ref to pipe %d from fs %d\n",
1726 ioc_pipe->pipe_nr, fs->fs_nr);
1728 purge_flow_set(fs, 0);
1731 fs_remove_from_heap(&ready_heap, &b->fs);
1732 purge_pipe(b); /* Remove all data associated to this pipe */
1734 /* Remove reference to here from extract_heap and wfq_ready_heap */
1735 pipe_remove_from_heap(&extract_heap, b);
1736 pipe_remove_from_heap(&wfq_ready_heap, b);
1738 kfree(b, M_DUMMYNET);
1739 } else { /* This is a WF2Q queue (dn_flow_set) */
1740 struct dn_flow_set *a, *b;
1742 /* Locate flow_set */
1743 for (a = NULL, b = all_flow_sets; b && b->fs_nr < ioc_pipe->fs.fs_nr;
1746 if (b == NULL || b->fs_nr != ioc_pipe->fs.fs_nr)
1747 goto back; /* Not found */
1750 all_flow_sets = b->next;
1754 /* Remove references to this flow_set from the ip_fw rules. */
1757 if (b->pipe != NULL) {
1758 /* Update total weight on parent pipe and cleanup parent heaps */
1759 b->pipe->sum -= b->weight * b->backlogged;
1760 fs_remove_from_heap(&b->pipe->not_eligible_heap, b);
1761 fs_remove_from_heap(&b->pipe->scheduler_heap, b);
1762 #if 1 /* XXX should i remove from idle_heap as well ? */
1763 fs_remove_from_heap(&b->pipe->idle_heap, b);
1766 purge_flow_set(b, 1);
1776 * helper function used to copy data from kernel in DUMMYNET_GET
1779 dn_copy_flowid(const struct ipfw_flow_id *id, struct dn_ioc_flowid *ioc_id)
1781 ioc_id->type = ETHERTYPE_IP;
1782 ioc_id->u.ip.dst_ip = id->dst_ip;
1783 ioc_id->u.ip.src_ip = id->src_ip;
1784 ioc_id->u.ip.dst_port = id->dst_port;
1785 ioc_id->u.ip.src_port = id->src_port;
1786 ioc_id->u.ip.proto = id->proto;
1787 ioc_id->u.ip.flags = id->flags;
1791 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1793 const struct dn_flow_queue *q;
1794 struct dn_ioc_flowqueue *ioc_fq = bp;
1797 for (i = 0; i <= fs->rq_size; i++) {
1798 for (q = fs->rq[i]; q; q = q->next, ioc_fq++) {
1799 if (q->hash_slot != i) { /* XXX ASSERT */
1800 kprintf("++ at %d: wrong slot (have %d, "
1801 "should be %d)\n", copied, q->hash_slot, i);
1803 if (q->fs != fs) { /* XXX ASSERT */
1804 kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1810 ioc_fq->len = q->len;
1811 ioc_fq->len_bytes = q->len_bytes;
1812 ioc_fq->tot_pkts = q->tot_pkts;
1813 ioc_fq->tot_bytes = q->tot_bytes;
1814 ioc_fq->drops = q->drops;
1815 ioc_fq->hash_slot = q->hash_slot;
1818 dn_copy_flowid(&q->id, &ioc_fq->id);
1822 if (copied != fs->rq_elements) { /* XXX ASSERT */
1823 kprintf("++ wrong count, have %d should be %d\n",
1824 copied, fs->rq_elements);
1830 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1833 ioc_fs->fs_type = fs_type;
1835 ioc_fs->fs_nr = fs->fs_nr;
1836 ioc_fs->flags_fs = fs->flags_fs;
1837 ioc_fs->parent_nr = fs->parent_nr;
1839 ioc_fs->weight = fs->weight;
1840 ioc_fs->qsize = fs->qsize;
1841 ioc_fs->plr = fs->plr;
1843 ioc_fs->rq_size = fs->rq_size;
1844 ioc_fs->rq_elements = fs->rq_elements;
1846 ioc_fs->w_q = fs->w_q;
1847 ioc_fs->max_th = fs->max_th;
1848 ioc_fs->min_th = fs->min_th;
1849 ioc_fs->max_p = fs->max_p;
1851 dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1855 dummynet_get(struct sockopt *sopt)
1857 struct dn_flow_set *fs;
1858 struct dn_pipe *pipe;
1866 * Compute size of data structures: list of pipes and flow_sets.
1868 for (pipe = all_pipes; pipe; pipe = pipe->next) {
1869 size += sizeof(struct dn_ioc_pipe) +
1870 pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1873 for (fs = all_flow_sets; fs; fs = fs->next) {
1874 size += sizeof(struct dn_ioc_flowset) +
1875 fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1878 bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1880 for (pipe = all_pipes; pipe; pipe = pipe->next) {
1881 struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)bp;
1884 * Copy flow set descriptor associated with this pipe
1886 dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1889 * Copy pipe descriptor
1891 ioc_pipe->bandwidth = pipe->bandwidth;
1892 ioc_pipe->pipe_nr = pipe->pipe_nr;
1893 ioc_pipe->V = pipe->V;
1894 /* Convert delay to milliseconds */
1895 ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1898 * Copy flow queue descriptors
1900 bp += sizeof(*ioc_pipe);
1901 bp = dn_copy_flowqueues(&pipe->fs, bp);
1904 for (fs = all_flow_sets; fs; fs = fs->next) {
1905 struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)bp;
1908 * Copy flow set descriptor
1910 dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1913 * Copy flow queue descriptors
1915 bp += sizeof(*ioc_fs);
1916 bp = dn_copy_flowqueues(fs, bp);
1921 error = sooptcopyout(sopt, buf, size);
1927 * Handler for the various dummynet socket options (get, flush, config, del)
1930 ip_dn_ctl(struct sockopt *sopt)
1932 struct dn_ioc_pipe tmp_ioc_pipe;
1935 /* Disallow sets in really-really secure mode. */
1936 if (sopt->sopt_dir == SOPT_SET) {
1937 if (securelevel >= 3)
1941 switch (sopt->sopt_name) {
1942 case IP_DUMMYNET_GET:
1943 error = dummynet_get(sopt);
1946 case IP_DUMMYNET_FLUSH:
1950 case IP_DUMMYNET_CONFIGURE:
1951 error = sooptcopyin(sopt, &tmp_ioc_pipe, sizeof(tmp_ioc_pipe),
1952 sizeof(tmp_ioc_pipe));
1955 error = config_pipe(&tmp_ioc_pipe);
1958 case IP_DUMMYNET_DEL: /* Remove a pipe or flow_set */
1959 error = sooptcopyin(sopt, &tmp_ioc_pipe, sizeof(tmp_ioc_pipe),
1960 sizeof(tmp_ioc_pipe));
1963 error = delete_pipe(&tmp_ioc_pipe);
1967 kprintf("%s -- unknown option %d\n", __func__, sopt->sopt_name);
1975 dummynet_clock(systimer_t info __unused, struct intrframe *frame __unused)
1977 KASSERT(mycpu->gd_cpuid == dn_cpu,
1978 ("systimer comes on a different cpu!\n"));
1981 if (dn_netmsg.nm_lmsg.ms_flags & MSGF_DONE)
1982 lwkt_sendmsg(cpu_portfn(mycpu->gd_cpuid), &dn_netmsg.nm_lmsg);
1987 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1992 error = sysctl_handle_int(oidp, &val, 0, req);
1993 if (error || req->newptr == NULL)
1997 else if (val > DUMMYNET_CALLOUT_FREQ_MAX)
1998 val = DUMMYNET_CALLOUT_FREQ_MAX;
2002 systimer_adjust_periodic(&dn_clock, val);
2009 ip_dn_register_systimer(struct netmsg *msg)
2011 systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
2012 lwkt_replymsg(&msg->nm_lmsg, 0);
2016 ip_dn_deregister_systimer(struct netmsg *msg)
2018 systimer_del(&dn_clock);
2019 lwkt_replymsg(&msg->nm_lmsg, 0);
2028 kprintf("DUMMYNET initialized (011031)\n");
2031 all_flow_sets = NULL;
2033 ready_heap.size = ready_heap.elements = 0;
2034 ready_heap.offset = 0;
2036 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
2037 wfq_ready_heap.offset = 0;
2039 extract_heap.size = extract_heap.elements = 0;
2040 extract_heap.offset = 0;
2042 ip_dn_ctl_ptr = ip_dn_ctl;
2043 ip_dn_io_ptr = dummynet_io;
2044 ip_dn_ruledel_ptr = dn_rule_delete;
2046 netmsg_init(&dn_netmsg, &netisr_adone_rport, 0, dummynet);
2048 netmsg_init(&smsg, &curthread->td_msgport, 0, ip_dn_register_systimer);
2049 port = cpu_portfn(dn_cpu);
2050 lwkt_domsg(port, &smsg.nm_lmsg, 0);
2059 netmsg_init(&smsg, &curthread->td_msgport, 0, ip_dn_deregister_systimer);
2060 port = cpu_portfn(dn_cpu);
2061 lwkt_domsg(port, &smsg.nm_lmsg, 0);
2065 ip_dn_ctl_ptr = NULL;
2066 ip_dn_io_ptr = NULL;
2067 ip_dn_ruledel_ptr = NULL;
2069 netmsg_service_sync();
2073 dummynet_modevent(module_t mod, int type, void *data)
2078 if (DUMMYNET_LOADED) {
2080 kprintf("DUMMYNET already loaded\n");
2089 kprintf("dummynet statically compiled, cannot unload\n");
2104 static moduledata_t dummynet_mod = {
2109 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2110 MODULE_DEPEND(dummynet, ipfw, 1, 1, 1);
2111 MODULE_VERSION(dummynet, 1);