altq/cbq: Clear poll-cache if the cached class is to be destroyed
[dragonfly.git] / sys / net / altq / altq_rmclass.c
CommitLineData
0f506ced 1/* @(#)rm_class.c 1.48 97/12/05 SMI */
4d723e5a 2/* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
4d723e5a
JS
3
4/*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 */
39
4d723e5a
JS
40#include "opt_altq.h"
41#include "opt_inet.h"
42#include "opt_inet6.h"
43
44#ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
45
46#include <sys/param.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/socket.h>
50#include <sys/systm.h>
51#include <sys/callout.h>
52#include <sys/errno.h>
53#include <sys/time.h>
9c095379 54#include <sys/thread.h>
4d723e5a
JS
55
56#include <net/if.h>
57
58#include <net/altq/altq.h>
59#include <net/altq/altq_rmclass.h>
60#include <net/altq/altq_rmclass_debug.h>
61#include <net/altq/altq_red.h>
62#include <net/altq/altq_rio.h>
63
9c095379
MD
64#include <sys/thread2.h>
65
4d723e5a
JS
66#ifdef CBQ_TRACE
67static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
68static struct cbqtrace *cbqtrace_ptr = NULL;
69static int cbqtrace_count;
70#endif
71
72/*
73 * Local Macros
74 */
75
76#define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
77
78/*
79 * Local routines.
80 */
81
82static int rmc_satisfied(struct rm_class *, struct timeval *);
83static void rmc_wrr_set_weights(struct rm_ifdat *);
84static void rmc_depth_compute(struct rm_class *);
85static void rmc_depth_recompute(rm_class_t *);
86
87static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
88static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
89
90static int _rmc_addq(rm_class_t *, struct mbuf *);
91static void _rmc_dropq(rm_class_t *);
92static struct mbuf *_rmc_getq(rm_class_t *);
93static struct mbuf *_rmc_pollq(rm_class_t *);
94
95static int rmc_under_limit(struct rm_class *, struct timeval *);
96static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
97static void rmc_drop_action(struct rm_class *);
98static void rmc_restart(void *);
99static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
100
101#define BORROW_OFFTIME
102/*
103 * BORROW_OFFTIME (experimental):
104 * borrow the offtime of the class borrowing from.
105 * the reason is that when its own offtime is set, the class is unable
106 * to borrow much, especially when cutoff is taking effect.
107 * but when the borrowed class is overloaded (advidle is close to minidle),
108 * use the borrowing class's offtime to avoid overload.
109 */
110#define ADJUST_CUTOFF
111/*
112 * ADJUST_CUTOFF (experimental):
113 * if no underlimit class is found due to cutoff, increase cutoff and
114 * retry the scheduling loop.
115 * also, don't invoke delay_actions while cutoff is taking effect,
116 * since a sleeping class won't have a chance to be scheduled in the
117 * next loop.
118 *
119 * now heuristics for setting the top-level variable (cutoff_) becomes:
120 * 1. if a packet arrives for a not-overlimit class, set cutoff
121 * to the depth of the class.
122 * 2. if cutoff is i, and a packet arrives for an overlimit class
123 * with an underlimit ancestor at a lower level than i (say j),
124 * then set cutoff to j.
125 * 3. at scheduling a packet, if there is no underlimit class
126 * due to the current cutoff level, increase cutoff by 1 and
127 * then try to schedule again.
128 */
129
130/*
131 * rm_class_t *
132 * rmc_newclass(...) - Create a new resource management class at priority
133 * 'pri' on the interface given by 'ifd'.
134 *
135 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
136 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
137 * than 100% of the bandwidth, this number should be the
138 * 'effective' rate for the class. Let f be the
139 * bandwidth fraction allocated to this class, and let
140 * nsPerByte be the data rate of the output link in
141 * nanoseconds/byte. Then nsecPerByte is set to
142 * nsPerByte / f. E.g., 1600 (= 800 / .5)
143 * for a class that gets 50% of an ethernet's bandwidth.
144 *
145 * action the routine to call when the class is over limit.
146 *
147 * maxq max allowable queue size for class (in packets).
148 *
149 * parent parent class pointer.
150 *
151 * borrow class to borrow from (should be either 'parent' or null).
152 *
153 * maxidle max value allowed for class 'idle' time estimate (this
154 * parameter determines how large an initial burst of packets
155 * can be before overlimit action is invoked.
156 *
157 * offtime how long 'delay' action will delay when class goes over
158 * limit (this parameter determines the steady-state burst
159 * size when a class is running over its limit).
160 *
161 * Maxidle and offtime have to be computed from the following: If the
162 * average packet size is s, the bandwidth fraction allocated to this
163 * class is f, we want to allow b packet bursts, and the gain of the
164 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
165 *
166 * ptime = s * nsPerByte * (1 - f) / f
167 * maxidle = ptime * (1 - g^b) / g^b
168 * minidle = -ptime * (1 / (f - 1))
169 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
170 *
171 * Operationally, it's convenient to specify maxidle & offtime in units
172 * independent of the link bandwidth so the maxidle & offtime passed to
173 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
174 * (The constant factor is a scale factor needed to make the parameters
175 * integers. This scaling also means that the 'unscaled' values of
176 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
177 * not nanoseconds.) Also note that the 'idle' filter computation keeps
178 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
179 * maxidle also must be scaled upward by this value. Thus, the passed
180 * values for maxidle and offtime can be computed as follows:
181 *
182 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
183 * offtime = offtime * 8 / (1000 * nsecPerByte)
184 *
185 * When USE_HRTIME is employed, then maxidle and offtime become:
186 * maxidle = maxilde * (8.0 / nsecPerByte);
187 * offtime = offtime * (8.0 / nsecPerByte);
188 */
189struct rm_class *
190rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
191 void (*action)(rm_class_t *, rm_class_t *), int maxq,
192 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
193 int minidle, u_int offtime, int pktsize, int flags)
194{
195 struct rm_class *cl;
196 struct rm_class *peer;
4d723e5a
JS
197
198 if (pri >= RM_MAXPRIO)
199 return (NULL);
200#ifndef ALTQ_RED
201 if (flags & RMCF_RED) {
202#ifdef ALTQ_DEBUG
4b1cf444 203 kprintf("rmc_newclass: RED not configured for CBQ!\n");
4d723e5a
JS
204#endif
205 return (NULL);
206 }
207#endif
208#ifndef ALTQ_RIO
209 if (flags & RMCF_RIO) {
210#ifdef ALTQ_DEBUG
4b1cf444 211 kprintf("rmc_newclass: RIO not configured for CBQ!\n");
4d723e5a
JS
212#endif
213 return (NULL);
214 }
215#endif
216
efda3bd0 217 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
4d723e5a 218 callout_init(&cl->callout_);
efda3bd0 219 cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
4d723e5a
JS
220
221 /*
222 * Class initialization.
223 */
224 cl->children_ = NULL;
225 cl->parent_ = parent;
226 cl->borrow_ = borrow;
227 cl->leaf_ = 1;
228 cl->ifdat_ = ifd;
229 cl->pri_ = pri;
230 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
231 cl->depth_ = 0;
232 cl->qthresh_ = 0;
233 cl->ns_per_byte_ = nsecPerByte;
234
235 qlimit(cl->q_) = maxq;
236 qtype(cl->q_) = Q_DROPHEAD;
237 qlen(cl->q_) = 0;
238 cl->flags_ = flags;
239
240#if 1 /* minidle is also scaled in ALTQ */
241 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
242 if (cl->minidle_ > 0)
243 cl->minidle_ = 0;
244#else
245 cl->minidle_ = minidle;
246#endif
247 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
248 if (cl->maxidle_ == 0)
249 cl->maxidle_ = 1;
250#if 1 /* offtime is also scaled in ALTQ */
251 cl->avgidle_ = cl->maxidle_;
252 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
253 if (cl->offtime_ == 0)
254 cl->offtime_ = 1;
255#else
256 cl->avgidle_ = 0;
257 cl->offtime_ = (offtime * nsecPerByte) / 8;
258#endif
259 cl->overlimit = action;
260
261#ifdef ALTQ_RED
262 if (flags & (RMCF_RED|RMCF_RIO)) {
263 int red_flags, red_pkttime;
264
265 red_flags = 0;
266 if (flags & RMCF_ECN)
267 red_flags |= REDF_ECN;
268#ifdef ALTQ_RIO
269 if (flags & RMCF_CLEARDSCP)
270 red_flags |= RIOF_CLEARDSCP;
271#endif
272 red_pkttime = nsecPerByte * pktsize / 1000;
273
274 if (flags & RMCF_RED) {
275 cl->red_ = red_alloc(0, 0,
276 qlimit(cl->q_) * 10/100,
277 qlimit(cl->q_) * 30/100,
278 red_flags, red_pkttime);
279 if (cl->red_ != NULL)
280 qtype(cl->q_) = Q_RED;
281 }
282#ifdef ALTQ_RIO
283 else {
284 cl->red_ = (red_t *)rio_alloc(0, NULL,
285 red_flags, red_pkttime);
286 if (cl->red_ != NULL)
287 qtype(cl->q_) = Q_RIO;
288 }
289#endif
290 }
291#endif /* ALTQ_RED */
292
293 /*
294 * put the class into the class tree
295 */
0b31d406 296 crit_enter();
4d723e5a
JS
297 if ((peer = ifd->active_[pri]) != NULL) {
298 /* find the last class at this pri */
299 cl->peer_ = peer;
300 while (peer->peer_ != ifd->active_[pri])
301 peer = peer->peer_;
302 peer->peer_ = cl;
303 } else {
304 ifd->active_[pri] = cl;
305 cl->peer_ = cl;
306 }
307
308 if (cl->parent_) {
309 cl->next_ = parent->children_;
310 parent->children_ = cl;
311 parent->leaf_ = 0;
312 }
313
314 /*
315 * Compute the depth of this class and its ancestors in the class
316 * hierarchy.
317 */
318 rmc_depth_compute(cl);
319
320 /*
321 * If CBQ's WRR is enabled, then initialize the class WRR state.
322 */
323 if (ifd->wrr_) {
324 ifd->num_[pri]++;
325 ifd->alloc_[pri] += cl->allotment_;
326 rmc_wrr_set_weights(ifd);
327 }
0b31d406 328 crit_exit();
4d723e5a
JS
329 return (cl);
330}
331
332int
333rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
334 int minidle, u_int offtime, int pktsize)
335{
336 struct rm_ifdat *ifd;
337 u_int old_allotment;
4d723e5a
JS
338
339 ifd = cl->ifdat_;
340 old_allotment = cl->allotment_;
341
0b31d406 342 crit_enter();
4d723e5a
JS
343 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
344 cl->qthresh_ = 0;
345 cl->ns_per_byte_ = nsecPerByte;
346
347 qlimit(cl->q_) = maxq;
348
349#if 1 /* minidle is also scaled in ALTQ */
350 cl->minidle_ = (minidle * nsecPerByte) / 8;
351 if (cl->minidle_ > 0)
352 cl->minidle_ = 0;
353#else
354 cl->minidle_ = minidle;
355#endif
356 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
357 if (cl->maxidle_ == 0)
358 cl->maxidle_ = 1;
359#if 1 /* offtime is also scaled in ALTQ */
360 cl->avgidle_ = cl->maxidle_;
361 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
362 if (cl->offtime_ == 0)
363 cl->offtime_ = 1;
364#else
365 cl->avgidle_ = 0;
366 cl->offtime_ = (offtime * nsecPerByte) / 8;
367#endif
368
369 /*
370 * If CBQ's WRR is enabled, then initialize the class WRR state.
371 */
372 if (ifd->wrr_) {
373 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
374 rmc_wrr_set_weights(ifd);
375 }
0b31d406 376 crit_exit();
4d723e5a
JS
377 return (0);
378}
379
380/*
381 * static void
382 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
383 * the appropriate run robin weights for the CBQ weighted round robin
384 * algorithm.
385 *
386 * Returns: NONE
387 */
388
389static void
390rmc_wrr_set_weights(struct rm_ifdat *ifd)
391{
392 int i;
393 struct rm_class *cl, *clh;
394
395 for (i = 0; i < RM_MAXPRIO; i++) {
396 /*
397 * This is inverted from that of the simulator to
398 * maintain precision.
399 */
400 if (ifd->num_[i] == 0)
401 ifd->M_[i] = 0;
402 else
403 ifd->M_[i] = ifd->alloc_[i] /
404 (ifd->num_[i] * ifd->maxpkt_);
405 /*
406 * Compute the weighted allotment for each class.
407 * This takes the expensive div instruction out
408 * of the main loop for the wrr scheduling path.
409 * These only get recomputed when a class comes or
410 * goes.
411 */
412 if (ifd->active_[i] != NULL) {
413 clh = cl = ifd->active_[i];
414 do {
415 /* safe-guard for slow link or alloc_ == 0 */
416 if (ifd->M_[i] == 0)
417 cl->w_allotment_ = 0;
418 else
419 cl->w_allotment_ = cl->allotment_ /
420 ifd->M_[i];
421 cl = cl->peer_;
422 } while ((cl != NULL) && (cl != clh));
423 }
424 }
425}
426
427int
428rmc_get_weight(struct rm_ifdat *ifd, int pri)
429{
430 if ((pri >= 0) && (pri < RM_MAXPRIO))
431 return (ifd->M_[pri]);
432 else
433 return (0);
434}
435
436/*
437 * static void
438 * rmc_depth_compute(struct rm_class *cl) - This function computes the
439 * appropriate depth of class 'cl' and its ancestors.
440 *
441 * Returns: NONE
442 */
443
444static void
445rmc_depth_compute(struct rm_class *cl)
446{
447 rm_class_t *t = cl, *p;
448
449 /*
450 * Recompute the depth for the branch of the tree.
451 */
452 while (t != NULL) {
453 p = t->parent_;
454 if (p && (t->depth_ >= p->depth_)) {
455 p->depth_ = t->depth_ + 1;
456 t = p;
457 } else
458 t = NULL;
459 }
460}
461
462/*
463 * static void
464 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
465 * the depth of the tree after a class has been deleted.
466 *
467 * Returns: NONE
468 */
469
470static void
471rmc_depth_recompute(rm_class_t *cl)
472{
473#if 1 /* ALTQ */
474 rm_class_t *p, *t;
475
476 p = cl;
477 while (p != NULL) {
478 if ((t = p->children_) == NULL) {
479 p->depth_ = 0;
480 } else {
481 int cdepth = 0;
482
483 while (t != NULL) {
484 if (t->depth_ > cdepth)
485 cdepth = t->depth_;
486 t = t->next_;
487 }
488
489 if (p->depth_ == cdepth + 1)
490 /* no change to this parent */
491 return;
492
493 p->depth_ = cdepth + 1;
494 }
495
496 p = p->parent_;
497 }
498#else
499 rm_class_t *t;
500
501 if (cl->depth_ >= 1) {
502 if (cl->children_ == NULL) {
503 cl->depth_ = 0;
504 } else if ((t = cl->children_) != NULL) {
505 while (t != NULL) {
506 if (t->children_ != NULL)
507 rmc_depth_recompute(t);
508 t = t->next_;
509 }
510 } else
511 rmc_depth_compute(cl);
512 }
513#endif
514}
515
516/*
517 * void
518 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
519 * function deletes a class from the link-sharing structure and frees
520 * all resources associated with the class.
521 *
522 * Returns: NONE
523 */
524
525void
526rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
527{
528 struct rm_class *p, *head, *previous;
4d723e5a
JS
529
530 KKASSERT(cl->children_ == NULL);
531
532 if (cl->sleeping_)
533 callout_stop(&cl->callout_);
534
0b31d406 535 crit_enter();
f96a3201
SZ
536
537 if (ifd->pollcache_ == cl)
538 ifd->pollcache_ = NULL;
539
4d723e5a
JS
540 /*
541 * Free packets in the packet queue.
542 * XXX - this may not be a desired behavior. Packets should be
543 * re-queued.
544 */
545 rmc_dropall(cl);
546
547 /*
548 * If the class has a parent, then remove the class from the
549 * class from the parent's children chain.
550 */
551 if (cl->parent_ != NULL) {
552 head = cl->parent_->children_;
553 p = previous = head;
554 if (head->next_ == NULL) {
555 KKASSERT(head == cl);
556 cl->parent_->children_ = NULL;
557 cl->parent_->leaf_ = 1;
558 } else while (p != NULL) {
559 if (p == cl) {
560 if (cl == head)
561 cl->parent_->children_ = cl->next_;
562 else
563 previous->next_ = cl->next_;
564 cl->next_ = NULL;
565 p = NULL;
566 } else {
567 previous = p;
568 p = p->next_;
569 }
570 }
571 }
572
573 /*
574 * Delete class from class priority peer list.
575 */
576 if ((p = ifd->active_[cl->pri_]) != NULL) {
577 /*
578 * If there is more than one member of this priority
579 * level, then look for class(cl) in the priority level.
580 */
581 if (p != p->peer_) {
582 while (p->peer_ != cl)
583 p = p->peer_;
584 p->peer_ = cl->peer_;
585
586 if (ifd->active_[cl->pri_] == cl)
587 ifd->active_[cl->pri_] = cl->peer_;
588 } else {
589 KKASSERT(p == cl);
590 ifd->active_[cl->pri_] = NULL;
591 }
592 }
593
594 /*
595 * Recompute the WRR weights.
596 */
597 if (ifd->wrr_) {
598 ifd->alloc_[cl->pri_] -= cl->allotment_;
599 ifd->num_[cl->pri_]--;
600 rmc_wrr_set_weights(ifd);
601 }
602
603 /*
604 * Re-compute the depth of the tree.
605 */
606#if 1 /* ALTQ */
607 rmc_depth_recompute(cl->parent_);
608#else
609 rmc_depth_recompute(ifd->root_);
610#endif
611
0b31d406 612 crit_exit();
4d723e5a
JS
613
614 /*
615 * Free the class structure.
616 */
617 if (cl->red_ != NULL) {
618#ifdef ALTQ_RIO
619 if (q_is_rio(cl->q_))
620 rio_destroy((rio_t *)cl->red_);
621#endif
622#ifdef ALTQ_RED
623 if (q_is_red(cl->q_))
624 red_destroy(cl->red_);
625#endif
626 }
efda3bd0
MD
627 kfree(cl->q_, M_ALTQ);
628 kfree(cl, M_ALTQ);
4d723e5a
JS
629}
630
631/*
632 * void
633 * rmc_init(...) - Initialize the resource management data structures
634 * associated with the output portion of interface 'ifp'. 'ifd' is
635 * where the structures will be built (for backwards compatibility, the
636 * structures aren't kept in the ifnet struct). 'nsecPerByte'
637 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
638 * 'restart' is the driver-specific routine that the generic 'delay
639 * until under limit' action will call to restart output. `maxq'
640 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
641 * is the maximum number of packets that the resource management
642 * code will allow to be queued 'downstream' (this is typically 1).
643 *
644 * Returns: NONE
645 */
646
647void
648rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
649 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
650 int minidle, u_int offtime, int flags)
651{
652 int i, mtu;
653
654 /*
655 * Initialize the CBQ tracing/debug facility.
656 */
657 CBQTRACEINIT();
658
659 bzero(ifd, sizeof (*ifd));
660 mtu = ifq->altq_ifp->if_mtu;
661 ifd->ifq_ = ifq;
662 ifd->restart = restart;
663 ifd->maxqueued_ = maxqueued;
664 ifd->ns_per_byte_ = nsecPerByte;
665 ifd->maxpkt_ = mtu;
666 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
667 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
668#if 1
669 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
670 if (mtu * nsecPerByte > 10 * 1000000)
671 ifd->maxiftime_ /= 4;
672#endif
673
674 reset_cutoff(ifd);
675 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
676
677 /*
678 * Initialize the CBQ's WRR state.
679 */
680 for (i = 0; i < RM_MAXPRIO; i++) {
681 ifd->alloc_[i] = 0;
682 ifd->M_[i] = 0;
683 ifd->num_[i] = 0;
684 ifd->na_[i] = 0;
685 ifd->active_[i] = NULL;
686 }
687
688 /*
689 * Initialize current packet state.
690 */
691 ifd->qi_ = 0;
692 ifd->qo_ = 0;
693 for (i = 0; i < RM_MAXQUEUED; i++) {
694 ifd->class_[i] = NULL;
695 ifd->curlen_[i] = 0;
696 ifd->borrowed_[i] = NULL;
697 }
698
699 /*
700 * Create the root class of the link-sharing structure.
701 */
702 ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
703 maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
704 if (ifd->root_ == NULL) {
4b1cf444 705 kprintf("rmc_init: root class not allocated\n");
4d723e5a
JS
706 return ;
707 }
708 ifd->root_->depth_ = 0;
709}
710
711/*
712 * void
713 * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
714 * mbuf 'm' to queue for resource class 'cl'. This routine is called
715 * by a driver's if_output routine. This routine must be called with
716 * output packet completion interrupts locked out (to avoid racing with
717 * rmc_dequeue_next).
718 *
719 * Returns: 0 on successful queueing
720 * -1 when packet drop occurs
721 */
722int
723rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
724{
725 struct timeval now;
726 struct rm_ifdat *ifd = cl->ifdat_;
727 int cpri = cl->pri_;
728 int is_empty = qempty(cl->q_);
729
730 RM_GETTIME(now);
731 if (ifd->cutoff_ > 0) {
732 if (TV_LT(&cl->undertime_, &now)) {
733 if (ifd->cutoff_ > cl->depth_)
734 ifd->cutoff_ = cl->depth_;
735 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
736 }
737#if 1 /* ALTQ */
738 else {
739 /*
740 * the class is overlimit. if the class has
741 * underlimit ancestors, set cutoff to the lowest
742 * depth among them.
743 */
744 struct rm_class *borrow = cl->borrow_;
745
746 while (borrow != NULL &&
747 borrow->depth_ < ifd->cutoff_) {
748 if (TV_LT(&borrow->undertime_, &now)) {
749 ifd->cutoff_ = borrow->depth_;
750 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
751 break;
752 }
753 borrow = borrow->borrow_;
754 }
755 }
756#else /* !ALTQ */
757 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
758 if (TV_LT(&cl->borrow_->undertime_, &now)) {
759 ifd->cutoff_ = cl->borrow_->depth_;
760 CBQTRACE(rmc_queue_packet, 'ffob',
761 cl->borrow_->depth_);
762 }
763 }
764#endif /* !ALTQ */
765 }
766
767 if (_rmc_addq(cl, m) < 0)
768 /* failed */
769 return (-1);
770
771 if (is_empty) {
772 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
773 ifd->na_[cpri]++;
774 }
775
776 if (qlen(cl->q_) > qlimit(cl->q_)) {
777 /* note: qlimit can be set to 0 or 1 */
778 rmc_drop_action(cl);
779 return (-1);
780 }
781 return (0);
782}
783
784/*
785 * void
786 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
787 * classes to see if there are satified.
788 */
789
790static void
791rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
792{
793 int i;
794 rm_class_t *p, *bp;
795
796 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
797 if ((bp = ifd->active_[i]) != NULL) {
798 p = bp;
799 do {
800 if (!rmc_satisfied(p, now)) {
801 ifd->cutoff_ = p->depth_;
802 return;
803 }
804 p = p->peer_;
805 } while (p != bp);
806 }
807 }
808
809 reset_cutoff(ifd);
810}
811
812/*
813 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
814 */
815
816static int
817rmc_satisfied(struct rm_class *cl, struct timeval *now)
818{
819 rm_class_t *p;
820
821 if (cl == NULL)
822 return (1);
823 if (TV_LT(now, &cl->undertime_))
824 return (1);
825 if (cl->depth_ == 0) {
826 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
827 return (0);
828 else
829 return (1);
830 }
831 if (cl->children_ != NULL) {
832 p = cl->children_;
833 while (p != NULL) {
834 if (!rmc_satisfied(p, now))
835 return (0);
836 p = p->next_;
837 }
838 }
839
840 return (1);
841}
842
843/*
844 * Return 1 if class 'cl' is under limit or can borrow from a parent,
845 * 0 if overlimit. As a side-effect, this routine will invoke the
846 * class overlimit action if the class if overlimit.
847 */
848
849static int
850rmc_under_limit(struct rm_class *cl, struct timeval *now)
851{
852 rm_class_t *p = cl;
853 rm_class_t *top;
854 struct rm_ifdat *ifd = cl->ifdat_;
855
856 ifd->borrowed_[ifd->qi_] = NULL;
857 /*
858 * If cl is the root class, then always return that it is
859 * underlimit. Otherwise, check to see if the class is underlimit.
860 */
861 if (cl->parent_ == NULL)
862 return (1);
863
864 if (cl->sleeping_) {
865 if (TV_LT(now, &cl->undertime_))
866 return (0);
867
868 callout_stop(&cl->callout_);
869 cl->sleeping_ = 0;
870 cl->undertime_.tv_sec = 0;
871 return (1);
872 }
873
874 top = NULL;
875 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
876 if (((cl = cl->borrow_) == NULL) ||
877 (cl->depth_ > ifd->cutoff_)) {
878#ifdef ADJUST_CUTOFF
879 if (cl != NULL)
880 /* cutoff is taking effect, just
881 return false without calling
882 the delay action. */
883 return (0);
884#endif
885#ifdef BORROW_OFFTIME
886 /*
887 * check if the class can borrow offtime too.
888 * borrow offtime from the top of the borrow
889 * chain if the top class is not overloaded.
890 */
891 if (cl != NULL) {
892 /* cutoff is taking effect, use this class as top. */
893 top = cl;
894 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
895 }
896 if (top != NULL && top->avgidle_ == top->minidle_)
897 top = NULL;
898 p->overtime_ = *now;
899 (p->overlimit)(p, top);
900#else
901 p->overtime_ = *now;
902 (p->overlimit)(p, NULL);
903#endif
904 return (0);
905 }
906 top = cl;
907 }
908
909 if (cl != p)
910 ifd->borrowed_[ifd->qi_] = cl;
911 return (1);
912}
913
914/*
915 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
916 * Packet-by-packet round robin.
917 *
918 * The heart of the weighted round-robin scheduler, which decides which
919 * class next gets to send a packet. Highest priority first, then
920 * weighted round-robin within priorites.
921 *
922 * Each able-to-send class gets to send until its byte allocation is
923 * exhausted. Thus, the active pointer is only changed after a class has
924 * exhausted its allocation.
925 *
926 * If the scheduler finds no class that is underlimit or able to borrow,
927 * then the first class found that had a nonzero queue and is allowed to
928 * borrow gets to send.
929 */
930
931static struct mbuf *
932_rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
933{
934 struct rm_class *cl = NULL, *first = NULL;
935 u_int deficit;
936 int cpri;
937 struct mbuf *m;
938 struct timeval now;
939
940 RM_GETTIME(now);
941
942 /*
943 * if the driver polls the top of the queue and then removes
944 * the polled packet, we must return the same packet.
945 */
946 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
947 cl = ifd->pollcache_;
948 cpri = cl->pri_;
949 if (ifd->efficient_) {
950 /* check if this class is overlimit */
951 if (cl->undertime_.tv_sec != 0 &&
952 rmc_under_limit(cl, &now) == 0)
953 first = cl;
954 }
955 ifd->pollcache_ = NULL;
956 goto _wrr_out;
957 }
958 else {
959 /* mode == ALTDQ_POLL || pollcache == NULL */
960 ifd->pollcache_ = NULL;
961 ifd->borrowed_[ifd->qi_] = NULL;
962 }
963#ifdef ADJUST_CUTOFF
964 _again:
965#endif
966 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
967 if (ifd->na_[cpri] == 0)
968 continue;
969 deficit = 0;
970 /*
971 * Loop through twice for a priority level, if some class
972 * was unable to send a packet the first round because
973 * of the weighted round-robin mechanism.
974 * During the second loop at this level, deficit==2.
975 * (This second loop is not needed if for every class,
976 * "M[cl->pri_])" times "cl->allotment" is greater than
977 * the byte size for the largest packet in the class.)
978 */
979 _wrr_loop:
980 cl = ifd->active_[cpri];
981 KKASSERT(cl != NULL);
982 do {
983 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
984 cl->bytes_alloc_ += cl->w_allotment_;
985 if (!qempty(cl->q_)) {
986 if ((cl->undertime_.tv_sec == 0) ||
987 rmc_under_limit(cl, &now)) {
988 if (cl->bytes_alloc_ > 0 || deficit > 1)
989 goto _wrr_out;
990
991 /* underlimit but no alloc */
992 deficit = 1;
993#if 1
994 ifd->borrowed_[ifd->qi_] = NULL;
995#endif
996 }
997 else if (first == NULL && cl->borrow_ != NULL)
998 first = cl; /* borrowing candidate */
999 }
1000
1001 cl->bytes_alloc_ = 0;
1002 cl = cl->peer_;
1003 } while (cl != ifd->active_[cpri]);
1004
1005 if (deficit == 1) {
1006 /* first loop found an underlimit class with deficit */
1007 /* Loop on same priority level, with new deficit. */
1008 deficit = 2;
1009 goto _wrr_loop;
1010 }
1011 }
1012
1013#ifdef ADJUST_CUTOFF
1014 /*
1015 * no underlimit class found. if cutoff is taking effect,
1016 * increase cutoff and try again.
1017 */
1018 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1019 ifd->cutoff_++;
1020 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1021 goto _again;
1022 }
1023#endif /* ADJUST_CUTOFF */
1024 /*
1025 * If LINK_EFFICIENCY is turned on, then the first overlimit
1026 * class we encounter will send a packet if all the classes
1027 * of the link-sharing structure are overlimit.
1028 */
1029 reset_cutoff(ifd);
1030 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1031
1032 if (!ifd->efficient_ || first == NULL)
1033 return (NULL);
1034
1035 cl = first;
1036 cpri = cl->pri_;
1037#if 0 /* too time-consuming for nothing */
1038 if (cl->sleeping_)
1039 callout_stop(&cl->callout_);
1040 cl->sleeping_ = 0;
1041 cl->undertime_.tv_sec = 0;
1042#endif
1043 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1044 ifd->cutoff_ = cl->borrow_->depth_;
1045
1046 /*
1047 * Deque the packet and do the book keeping...
1048 */
1049 _wrr_out:
1050 if (op == ALTDQ_REMOVE) {
1051 m = _rmc_getq(cl);
1052 if (m == NULL)
1053 panic("_rmc_wrr_dequeue_next");
1054 if (qempty(cl->q_))
1055 ifd->na_[cpri]--;
1056
1057 /*
1058 * Update class statistics and link data.
1059 */
1060 if (cl->bytes_alloc_ > 0)
1061 cl->bytes_alloc_ -= m_pktlen(m);
1062
1063 if ((cl->bytes_alloc_ <= 0) || first == cl)
1064 ifd->active_[cl->pri_] = cl->peer_;
1065 else
1066 ifd->active_[cl->pri_] = cl;
1067
1068 ifd->class_[ifd->qi_] = cl;
1069 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1070 ifd->now_[ifd->qi_] = now;
1071 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1072 ifd->queued_++;
1073 } else {
1074 /* mode == ALTDQ_PPOLL */
1075 m = _rmc_pollq(cl);
1076 ifd->pollcache_ = cl;
1077 }
1078 return (m);
1079}
1080
1081/*
1082 * Dequeue & return next packet from the highest priority class that
1083 * has a packet to send & has enough allocation to send it. This
1084 * routine is called by a driver whenever it needs a new packet to
1085 * output.
1086 */
1087static struct mbuf *
1088_rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1089{
1090 struct mbuf *m;
1091 int cpri;
1092 struct rm_class *cl, *first = NULL;
1093 struct timeval now;
1094
1095 RM_GETTIME(now);
1096
1097 /*
1098 * if the driver polls the top of the queue and then removes
1099 * the polled packet, we must return the same packet.
1100 */
1101 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1102 cl = ifd->pollcache_;
1103 cpri = cl->pri_;
1104 ifd->pollcache_ = NULL;
1105 goto _prr_out;
1106 } else {
1107 /* mode == ALTDQ_POLL || pollcache == NULL */
1108 ifd->pollcache_ = NULL;
1109 ifd->borrowed_[ifd->qi_] = NULL;
1110 }
1111#ifdef ADJUST_CUTOFF
1112 _again:
1113#endif
1114 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1115 if (ifd->na_[cpri] == 0)
1116 continue;
1117 cl = ifd->active_[cpri];
1118 KKASSERT(cl != NULL);
1119 do {
1120 if (!qempty(cl->q_)) {
1121 if ((cl->undertime_.tv_sec == 0) ||
1122 rmc_under_limit(cl, &now))
1123 goto _prr_out;
1124 if (first == NULL && cl->borrow_ != NULL)
1125 first = cl;
1126 }
1127 cl = cl->peer_;
1128 } while (cl != ifd->active_[cpri]);
1129 }
1130
1131#ifdef ADJUST_CUTOFF
1132 /*
1133 * no underlimit class found. if cutoff is taking effect, increase
1134 * cutoff and try again.
1135 */
1136 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1137 ifd->cutoff_++;
1138 goto _again;
1139 }
1140#endif /* ADJUST_CUTOFF */
1141 /*
1142 * If LINK_EFFICIENCY is turned on, then the first overlimit
1143 * class we encounter will send a packet if all the classes
1144 * of the link-sharing structure are overlimit.
1145 */
1146 reset_cutoff(ifd);
1147 if (!ifd->efficient_ || first == NULL)
1148 return (NULL);
1149
1150 cl = first;
1151 cpri = cl->pri_;
1152#if 0 /* too time-consuming for nothing */
1153 if (cl->sleeping_)
1154 callout_stop(&cl->callout_);
1155 cl->sleeping_ = 0;
1156 cl->undertime_.tv_sec = 0;
1157#endif
1158 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1159 ifd->cutoff_ = cl->borrow_->depth_;
1160
1161 /*
1162 * Deque the packet and do the book keeping...
1163 */
1164 _prr_out:
1165 if (op == ALTDQ_REMOVE) {
1166 m = _rmc_getq(cl);
1167 if (m == NULL)
1168 panic("_rmc_prr_dequeue_next");
1169 if (qempty(cl->q_))
1170 ifd->na_[cpri]--;
1171
1172 ifd->active_[cpri] = cl->peer_;
1173
1174 ifd->class_[ifd->qi_] = cl;
1175 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1176 ifd->now_[ifd->qi_] = now;
1177 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1178 ifd->queued_++;
1179 } else {
1180 /* mode == ALTDQ_POLL */
1181 m = _rmc_pollq(cl);
1182 ifd->pollcache_ = cl;
1183 }
1184 return (m);
1185}
1186
1187/*
1188 * struct mbuf *
1189 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1190 * is invoked by the packet driver to get the next packet to be
1191 * dequeued and output on the link. If WRR is enabled, then the
1192 * WRR dequeue next routine will determine the next packet to sent.
1193 * Otherwise, packet-by-packet round robin is invoked.
1194 *
1195 * Returns: NULL, if a packet is not available or if all
1196 * classes are overlimit.
1197 *
1198 * Otherwise, Pointer to the next packet.
1199 */
1200
1201struct mbuf *
1202rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1203{
1204 if (ifd->queued_ >= ifd->maxqueued_)
1205 return (NULL);
1206 else if (ifd->wrr_)
1207 return (_rmc_wrr_dequeue_next(ifd, mode));
1208 else
1209 return (_rmc_prr_dequeue_next(ifd, mode));
1210}
1211
1212/*
1213 * Update the utilization estimate for the packet that just completed.
1214 * The packet's class & the parent(s) of that class all get their
1215 * estimators updated. This routine is called by the driver's output-
1216 * packet-completion interrupt service routine.
1217 */
1218
1219/*
1220 * a macro to approximate "divide by 1000" that gives 0.000999,
1221 * if a value has enough effective digits.
1222 * (on pentium, mul takes 9 cycles but div takes 46!)
1223 */
1224#define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1225void
1226rmc_update_class_util(struct rm_ifdat *ifd)
1227{
1228 int idle, avgidle, pktlen;
1229 int pkt_time, tidle;
1230 rm_class_t *cl, *borrowed;
1231 rm_class_t *borrows;
1232 struct timeval *nowp;
1233
1234 /*
1235 * Get the most recent completed class.
1236 */
1237 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1238 return;
1239
1240 pktlen = ifd->curlen_[ifd->qo_];
1241 borrowed = ifd->borrowed_[ifd->qo_];
1242 borrows = borrowed;
1243
1244 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1245
1246 /*
1247 * Run estimator on class and its ancestors.
1248 */
1249 /*
1250 * rm_update_class_util is designed to be called when the
1251 * transfer is completed from a xmit complete interrupt,
1252 * but most drivers don't implement an upcall for that.
1253 * so, just use estimated completion time.
1254 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1255 */
1256 nowp = &ifd->now_[ifd->qo_];
1257 /* get pkt_time (for link) in usec */
1258#if 1 /* use approximation */
1259 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1260 pkt_time = NSEC_TO_USEC(pkt_time);
1261#else
1262 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1263#endif
1264#if 1 /* ALTQ4PPP */
1265 if (TV_LT(nowp, &ifd->ifnow_)) {
1266 int iftime;
1267
1268 /*
1269 * make sure the estimated completion time does not go
1270 * too far. it can happen when the link layer supports
1271 * data compression or the interface speed is set to
1272 * a much lower value.
1273 */
1274 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1275 if (iftime+pkt_time < ifd->maxiftime_) {
1276 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1277 } else {
1278 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1279 }
1280 } else {
1281 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1282 }
1283#else
1284 if (TV_LT(nowp, &ifd->ifnow_)) {
1285 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1286 } else {
1287 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1288 }
1289#endif
1290
1291 while (cl != NULL) {
1292 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1293 if (idle >= 2000000)
1294 /*
1295 * this class is idle enough, reset avgidle.
1296 * (TV_DELTA returns 2000000 us when delta is large.)
1297 */
1298 cl->avgidle_ = cl->maxidle_;
1299
1300 /* get pkt_time (for class) in usec */
1301#if 1 /* use approximation */
1302 pkt_time = pktlen * cl->ns_per_byte_;
1303 pkt_time = NSEC_TO_USEC(pkt_time);
1304#else
1305 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1306#endif
1307 idle -= pkt_time;
1308
1309 avgidle = cl->avgidle_;
1310 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1311 cl->avgidle_ = avgidle;
1312
1313 /* Are we overlimit ? */
1314 if (avgidle <= 0) {
1315 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1316#if 1 /* ALTQ */
1317 /*
1318 * need some lower bound for avgidle, otherwise
1319 * a borrowing class gets unbounded penalty.
1320 */
1321 if (avgidle < cl->minidle_)
1322 avgidle = cl->avgidle_ = cl->minidle_;
1323#endif
1324 /* set next idle to make avgidle 0 */
1325 tidle = pkt_time +
1326 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1327 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1328 ++cl->stats_.over;
1329 } else {
1330 cl->avgidle_ =
1331 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1332 cl->undertime_.tv_sec = 0;
1333 if (cl->sleeping_) {
1334 callout_stop(&cl->callout_);
1335 cl->sleeping_ = 0;
1336 }
1337 }
1338
1339 if (borrows != NULL) {
1340 if (borrows != cl)
1341 ++cl->stats_.borrows;
1342 else
1343 borrows = NULL;
1344 }
1345 cl->last_ = ifd->ifnow_;
1346 cl->last_pkttime_ = pkt_time;
1347
1348#if 1
1349 if (cl->parent_ == NULL) {
1350 /* take stats of root class */
1351 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1352 }
1353#endif
1354
1355 cl = cl->parent_;
1356 }
1357
1358 /*
1359 * Check to see if cutoff needs to set to a new level.
1360 */
1361 cl = ifd->class_[ifd->qo_];
1362 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1363#if 1 /* ALTQ */
1364 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1365 rmc_tl_satisfied(ifd, nowp);
1366 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1367 } else {
1368 ifd->cutoff_ = borrowed->depth_;
1369 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1370 }
1371#else /* !ALTQ */
1372 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1373 reset_cutoff(ifd);
1374#ifdef notdef
1375 rmc_tl_satisfied(ifd, &now);
1376#endif
1377 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1378 } else {
1379 ifd->cutoff_ = borrowed->depth_;
1380 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1381 }
1382#endif /* !ALTQ */
1383 }
1384
1385 /*
1386 * Release class slot
1387 */
1388 ifd->borrowed_[ifd->qo_] = NULL;
1389 ifd->class_[ifd->qo_] = NULL;
1390 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1391 ifd->queued_--;
1392}
1393
1394/*
1395 * void
1396 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1397 * over-limit action routines. These get invoked by rmc_under_limit()
1398 * if a class with packets to send if over its bandwidth limit & can't
1399 * borrow from a parent class.
1400 *
1401 * Returns: NONE
1402 */
1403
1404static void
1405rmc_drop_action(struct rm_class *cl)
1406{
1407 struct rm_ifdat *ifd = cl->ifdat_;
1408
1409 KKASSERT(qlen(cl->q_) > 0);
1410 _rmc_dropq(cl);
1411 if (qempty(cl->q_))
1412 ifd->na_[cl->pri_]--;
1413}
1414
3bf25ce1
SW
1415void
1416rmc_dropall(struct rm_class *cl)
4d723e5a
JS
1417{
1418 struct rm_ifdat *ifd = cl->ifdat_;
1419
1420 if (!qempty(cl->q_)) {
1421 _flushq(cl->q_);
1422
1423 ifd->na_[cl->pri_]--;
1424 }
1425}
1426
1427/*
1428 * void
1429 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1430 * delay action routine. It is invoked via rmc_under_limit when the
1431 * packet is discoverd to be overlimit.
1432 *
1433 * If the delay action is result of borrow class being overlimit, then
1434 * delay for the offtime of the borrowing class that is overlimit.
1435 *
1436 * Returns: NONE
1437 */
1438
1439void
1440rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1441{
1442 int delay, t, extradelay;
1443
1444 cl->stats_.overactions++;
1445 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1446#ifndef BORROW_OFFTIME
1447 delay += cl->offtime_;
1448#endif
1449
1450 if (!cl->sleeping_) {
1451 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1452#ifdef BORROW_OFFTIME
1453 if (borrow != NULL)
1454 extradelay = borrow->offtime_;
1455 else
1456#endif
1457 extradelay = cl->offtime_;
1458
1459#ifdef ALTQ
1460 /*
1461 * XXX recalculate suspend time:
1462 * current undertime is (tidle + pkt_time) calculated
1463 * from the last transmission.
1464 * tidle: time required to bring avgidle back to 0
1465 * pkt_time: target waiting time for this class
1466 * we need to replace pkt_time by offtime
1467 */
1468 extradelay -= cl->last_pkttime_;
1469#endif
1470 if (extradelay > 0) {
1471 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1472 delay += extradelay;
1473 }
1474
1475 cl->sleeping_ = 1;
1476 cl->stats_.delays++;
1477
1478 /*
1479 * Since packets are phased randomly with respect to the
1480 * clock, 1 tick (the next clock tick) can be an arbitrarily
1481 * short time so we have to wait for at least two ticks.
1482 * NOTE: If there's no other traffic, we need the timer as
1483 * a 'backstop' to restart this class.
1484 */
a591f597
MD
1485 if (delay > ustick * 2)
1486 t = (delay + ustick - 1) / ustick;
4d723e5a
JS
1487 else
1488 t = 2;
1489 callout_reset(&cl->callout_, t, rmc_restart, cl);
1490 }
1491}
1492
1493/*
1494 * void
1495 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1496 * called by the system timer code & is responsible checking if the
1497 * class is still sleeping (it might have been restarted as a side
1498 * effect of the queue scan on a packet arrival) and, if so, restarting
1499 * output for the class. Inspecting the class state & restarting output
1500 * require locking the class structure. In general the driver is
1501 * responsible for locking but this is the only routine that is not
1502 * called directly or indirectly from the interface driver so it has
1503 * know about system locking conventions. Under bsd, locking is done
1504 * by raising IPL to splimp so that's what's implemented here. On a
1505 * different system this would probably need to be changed.
1506 *
78195a76
MD
1507 * Since this function is called from an independant timeout, we
1508 * have to set up the lock conditions expected for the ALTQ operation.
1509 * Note that the restart will probably fall through to an if_start.
1510 *
4d723e5a
JS
1511 * Returns: NONE
1512 */
1513
1514static void
1515rmc_restart(void *arg)
1516{
1517 struct rm_class *cl = arg;
1518 struct rm_ifdat *ifd = cl->ifdat_;
4d723e5a 1519
e25d9bb2 1520 ALTQ_LOCK(ifd->ifq_);
4d723e5a
JS
1521 if (cl->sleeping_) {
1522 cl->sleeping_ = 0;
1523 cl->undertime_.tv_sec = 0;
1524
1525 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1526 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1527 (ifd->restart)(ifd->ifq_);
1528 }
1529 }
e25d9bb2 1530 ALTQ_UNLOCK(ifd->ifq_);
4d723e5a
JS
1531}
1532
1533/*
1534 * void
1535 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1536 * handling routine for the root class of the link sharing structure.
1537 *
1538 * Returns: NONE
1539 */
1540
1541static void
1542rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1543{
1544 panic("rmc_root_overlimit");
1545}
1546
1547/*
1548 * Packet Queue handling routines. Eventually, this is to localize the
1549 * effects on the code whether queues are red queues or droptail
1550 * queues.
1551 */
1552
1553static int
1554_rmc_addq(rm_class_t *cl, struct mbuf *m)
1555{
1556#ifdef ALTQ_RIO
1557 if (q_is_rio(cl->q_))
1558 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1559#endif
1560#ifdef ALTQ_RED
1561 if (q_is_red(cl->q_))
1562 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1563#endif /* ALTQ_RED */
1564
1565 if (cl->flags_ & RMCF_CLEARDSCP)
1566 write_dsfield(m, cl->pktattr_, 0);
1567
1568 _addq(cl->q_, m);
1569 return (0);
1570}
1571
1572/* note: _rmc_dropq is not called for red */
1573static void
1574_rmc_dropq(rm_class_t *cl)
1575{
1576 struct mbuf *m;
1577
1578 if ((m = _getq(cl->q_)) != NULL)
1579 m_freem(m);
1580}
1581
1582static struct mbuf *
1583_rmc_getq(rm_class_t *cl)
1584{
1585#ifdef ALTQ_RIO
1586 if (q_is_rio(cl->q_))
1587 return rio_getq((rio_t *)cl->red_, cl->q_);
1588#endif
1589#ifdef ALTQ_RED
1590 if (q_is_red(cl->q_))
1591 return red_getq(cl->red_, cl->q_);
1592#endif
1593 return _getq(cl->q_);
1594}
1595
1596static struct mbuf *
1597_rmc_pollq(rm_class_t *cl)
1598{
1599 return qhead(cl->q_);
1600}
1601
1602#ifdef CBQ_TRACE
1603/*
1604 * DDB hook to trace cbq events:
1605 * the last 1024 events are held in a circular buffer.
1606 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1607 */
1608void cbqtrace_dump(int);
1609static char *rmc_funcname(void *);
1610
1611static struct rmc_funcs {
1612 void *func;
1613 char *name;
1614} rmc_funcs[] = {
1615 rmc_init, "rmc_init",
1616 rmc_queue_packet, "rmc_queue_packet",
1617 rmc_under_limit, "rmc_under_limit",
1618 rmc_update_class_util, "rmc_update_class_util",
1619 rmc_delay_action, "rmc_delay_action",
1620 rmc_restart, "rmc_restart",
1621 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1622 NULL, NULL
1623};
1624
3bf25ce1
SW
1625static chari *
1626rmc_funcname(void *func)
4d723e5a
JS
1627{
1628 struct rmc_funcs *fp;
1629
1630 for (fp = rmc_funcs; fp->func != NULL; fp++) {
1631 if (fp->func == func)
1632 return (fp->name);
1633 }
1634
1635 return ("unknown");
1636}
1637
1638void
1639cbqtrace_dump(int counter)
1640{
1641 int i, *p;
1642 char *cp;
1643
1644 counter = counter % NCBQTRACE;
1645 p = (int *)&cbqtrace_buffer[counter];
1646
1647 for (i=0; i<20; i++) {
4b1cf444
SW
1648 kprintf("[0x%x] ", *p++);
1649 kprintf("%s: ", rmc_funcname((void *)*p++));
4d723e5a 1650 cp = (char *)p++;
4b1cf444
SW
1651 kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1652 kprintf("%d\n",*p++);
4d723e5a
JS
1653
1654 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1655 p = (int *)cbqtrace_buffer;
1656 }
1657}
1658#endif /* CBQ_TRACE */
1659#endif /* ALTQ_CBQ */